code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class _lowerCamelCase( _a ):
lowercase_ : int = """blip_2_vision_model"""
def __init__( self, lowerCamelCase=14_08, lowerCamelCase=61_44, lowerCamelCase=39, lowerCamelCase=16, lowerCamelCase=2_24, lowerCamelCase=14, lowerCamelCase="gelu", lowerCamelCase=0.0_0_0_0_1, lowerCamelCase=0.0, lowerCamelCase=1E-10, lowerCamelCase=True, **lowerCamelCase, ) -> str:
"""simple docstring"""
super().__init__(**lowerCamelCase)
_lowercase : Tuple = hidden_size
_lowercase : Tuple = intermediate_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : List[Any] = patch_size
_lowercase : List[str] = image_size
_lowercase : Dict = initializer_range
_lowercase : str = attention_dropout
_lowercase : Tuple = layer_norm_eps
_lowercase : List[Any] = hidden_act
_lowercase : Optional[Any] = qkv_bias
@classmethod
def UpperCamelCase ( cls, lowerCamelCase, **lowerCamelCase) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCamelCase)
_lowercase , _lowercase : Dict = cls.get_config_dict(lowerCamelCase, **lowerCamelCase)
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type') == "blip-2":
_lowercase : Tuple = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls, 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(lowerCamelCase, **lowerCamelCase)
class _lowerCamelCase( _a ):
lowercase_ : List[Any] = """blip_2_qformer"""
def __init__( self, lowerCamelCase=3_05_22, lowerCamelCase=7_68, lowerCamelCase=12, lowerCamelCase=12, lowerCamelCase=30_72, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=0.0_2, lowerCamelCase=1E-12, lowerCamelCase=0, lowerCamelCase="absolute", lowerCamelCase=2, lowerCamelCase=14_08, **lowerCamelCase, ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase, **lowerCamelCase)
_lowercase : Any = vocab_size
_lowercase : int = hidden_size
_lowercase : List[str] = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : str = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : str = attention_probs_dropout_prob
_lowercase : int = max_position_embeddings
_lowercase : Dict = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Tuple = position_embedding_type
_lowercase : Tuple = cross_attention_frequency
_lowercase : List[Any] = encoder_hidden_size
@classmethod
def UpperCamelCase ( cls, lowerCamelCase, **lowerCamelCase) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCamelCase)
_lowercase , _lowercase : int = cls.get_config_dict(lowerCamelCase, **lowerCamelCase)
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type') == "blip-2":
_lowercase : str = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls, 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(lowerCamelCase, **lowerCamelCase)
class _lowerCamelCase( _a ):
lowercase_ : Dict = """blip-2"""
lowercase_ : Union[str, Any] = True
def __init__( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=32, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase)
if vision_config is None:
_lowercase : List[Any] = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.')
if qformer_config is None:
_lowercase : str = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.')
if text_config is None:
_lowercase : Tuple = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).')
_lowercase : Tuple = BlipaVisionConfig(**lowerCamelCase)
_lowercase : List[Any] = BlipaQFormerConfig(**lowerCamelCase)
_lowercase : Tuple = text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowercase : Optional[Any] = CONFIG_MAPPING[text_model_type](**lowerCamelCase)
_lowercase : Optional[int] = self.text_config.tie_word_embeddings
_lowercase : int = self.text_config.is_encoder_decoder
_lowercase : List[Any] = num_query_tokens
_lowercase : Dict = self.vision_config.hidden_size
_lowercase : Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : Union[str, Any] = 1.0
_lowercase : List[str] = 0.0_2
@classmethod
def UpperCamelCase ( cls, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase, ) -> Union[str, Any]:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict(), **lowerCamelCase, )
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = copy.deepcopy(self.__dict__)
_lowercase : Union[str, Any] = self.vision_config.to_dict()
_lowercase : Optional[Any] = self.qformer_config.to_dict()
_lowercase : Dict = self.text_config.to_dict()
_lowercase : int = self.__class__.model_type
return output
| 21 |
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[str]=13 , _SCREAMING_SNAKE_CASE: Tuple=7 , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: str=False , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: int=99 , _SCREAMING_SNAKE_CASE: int=32 , _SCREAMING_SNAKE_CASE: List[str]=5 , _SCREAMING_SNAKE_CASE: Union[str, Any]=4 , _SCREAMING_SNAKE_CASE: int=64 , _SCREAMING_SNAKE_CASE: List[str]="gelu" , _SCREAMING_SNAKE_CASE: str=0.1 , _SCREAMING_SNAKE_CASE: Any=0.1 , _SCREAMING_SNAKE_CASE: Optional[int]=512 , _SCREAMING_SNAKE_CASE: Tuple=16 , _SCREAMING_SNAKE_CASE: Any=2 , _SCREAMING_SNAKE_CASE: List[str]=0.02 , _SCREAMING_SNAKE_CASE: Tuple=3 , _SCREAMING_SNAKE_CASE: Optional[Any]=4 , _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: int=2 , _SCREAMING_SNAKE_CASE: str=2 , _SCREAMING_SNAKE_CASE: Union[str, Any]=2 , _SCREAMING_SNAKE_CASE: List[Any]=2 , _SCREAMING_SNAKE_CASE: int=4 , _SCREAMING_SNAKE_CASE: List[str]=1 , ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = parent
__lowerCAmelCase : Optional[Any] = batch_size
__lowerCAmelCase : Union[str, Any] = seq_length
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : Optional[int] = use_input_mask
__lowerCAmelCase : Dict = use_token_type_ids
__lowerCAmelCase : Dict = use_labels
__lowerCAmelCase : Dict = vocab_size
__lowerCAmelCase : Tuple = hidden_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : Tuple = intermediate_size
__lowerCAmelCase : List[Any] = hidden_act
__lowerCAmelCase : Optional[Any] = hidden_dropout_prob
__lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : Union[str, Any] = type_vocab_size
__lowerCAmelCase : Optional[int] = type_sequence_label_size
__lowerCAmelCase : Dict = initializer_range
__lowerCAmelCase : Tuple = num_labels
__lowerCAmelCase : Optional[Any] = num_choices
__lowerCAmelCase : Union[str, Any] = scope
__lowerCAmelCase : Optional[Any] = q_groups
__lowerCAmelCase : Optional[int] = k_groups
__lowerCAmelCase : Any = v_groups
__lowerCAmelCase : int = post_attention_groups
__lowerCAmelCase : List[str] = intermediate_groups
__lowerCAmelCase : Optional[Any] = output_groups
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
__lowerCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length])
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : str = None
if self.use_labels:
__lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices)
__lowerCAmelCase : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Tuple) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[Any] = SqueezeBertModel(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Tuple) -> Dict:
"""simple docstring"""
__lowerCAmelCase : int = SqueezeBertForMaskedLM(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : str = SqueezeBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Union[str, Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Tuple) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.num_labels
__lowerCAmelCase : Union[str, Any] = SqueezeBertForSequenceClassification(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.num_labels
__lowerCAmelCase : Optional[int] = SqueezeBertForTokenClassification(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.num_choices
__lowerCAmelCase : str = SqueezeBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : int = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase : Union[str, Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase : str = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _SCREAMING_SNAKE_CASE ( self: str) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)) : Union[str, Any] = config_and_inputs
__lowerCAmelCase : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = SqueezeBertModelTester(self)
__lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , dim=37)
def _SCREAMING_SNAKE_CASE ( self: Any) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE)
@slow
def _SCREAMING_SNAKE_CASE ( self: Any) -> Dict:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[Any] = SqueezeBertModel.from_pretrained(_SCREAMING_SNAKE_CASE)
self.assertIsNotNone(_SCREAMING_SNAKE_CASE)
@require_sentencepiece
@require_tokenizers
@require_torch
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: int) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli")
__lowerCAmelCase : List[Any] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]])
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE)[0]
__lowerCAmelCase : Any = torch.Size((1, 3))
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = torch.tensor([[0.6401, -0.0349, -0.6041]])
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-4)) | 269 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
__SCREAMING_SNAKE_CASE :Any = None
__SCREAMING_SNAKE_CASE :Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Dict = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE :Optional[Any] = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
__SCREAMING_SNAKE_CASE :str = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
__SCREAMING_SNAKE_CASE :str = '''▁'''
# Segments (not really needed)
__SCREAMING_SNAKE_CASE :Union[str, Any] = 0
__SCREAMING_SNAKE_CASE :List[str] = 1
__SCREAMING_SNAKE_CASE :Tuple = 2
__SCREAMING_SNAKE_CASE :Any = 3
__SCREAMING_SNAKE_CASE :Union[str, Any] = 4
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
_lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Optional[Any] = """left"""
_lowerCamelCase : Any = XLNetTokenizer
def __init__( self : List[str] , snake_case_ : Union[str, Any]=None , snake_case_ : str=None , snake_case_ : Optional[Any]=False , snake_case_ : List[Any]=True , snake_case_ : str=False , snake_case_ : str="<s>" , snake_case_ : Any="</s>" , snake_case_ : int="<unk>" , snake_case_ : str="<sep>" , snake_case_ : Tuple="<pad>" , snake_case_ : Any="<cls>" , snake_case_ : Union[str, Any]="<mask>" , snake_case_ : int=["<eop>", "<eod>"] , **snake_case_ : List[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
super().__init__(
vocab_file=snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , additional_special_tokens=snake_case_ , **snake_case_ , )
_UpperCAmelCase = 3
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def lowercase ( self : Optional[int] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase ( self : Tuple , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase ( self : str , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 22 |
"""simple docstring"""
import os
from math import logaa
def _lowercase ( __snake_case = "base_exp.txt" ) -> int:
__lowerCAmelCase : float = 0
__lowerCAmelCase : Any = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__snake_case ) ,__snake_case ) ) ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = list(map(__snake_case ,line.split("," ) ) )
if x * logaa(__snake_case ) > largest:
__lowerCAmelCase : Tuple = x * logaa(__snake_case )
__lowerCAmelCase : Optional[Any] = i + 1
return result
if __name__ == "__main__":
print(solution()) | 269 | 0 |
'''simple docstring'''
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCamelCase__: List[str] = logging.get_logger(__name__)
UpperCamelCase__: Any = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
def __init__( self : Optional[int] , __snake_case : Optional[int]=None , **__snake_case : str ) -> Union[str, Any]:
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' )
UpperCAmelCase : Union[str, Any] = model
UpperCAmelCase : Optional[Any] = kwargs.get('''model_save_dir''' , __snake_case )
UpperCAmelCase : Optional[int] = kwargs.get('''latest_model_name''' , __snake_case )
def __call__( self : Optional[Any] , **__snake_case : Optional[Any] ) -> List[Any]:
UpperCAmelCase : Optional[int] = {k: np.array(__snake_case ) for k, v in kwargs.items()}
return self.model.run(__snake_case , __snake_case )
@staticmethod
def A ( __snake_case : Union[str, Path] , __snake_case : List[str]=None , __snake_case : Optional[int]=None ) -> Optional[int]:
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' )
UpperCAmelCase : List[Any] = '''CPUExecutionProvider'''
return ort.InferenceSession(__snake_case , providers=[provider] , sess_options=__snake_case )
def A ( self : Optional[Any] , __snake_case : Union[str, Path] , __snake_case : Optional[str] = None , **__snake_case : int ) -> str:
UpperCAmelCase : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCAmelCase : Optional[int] = self.model_save_dir.joinpath(self.latest_model_name )
UpperCAmelCase : int = Path(__snake_case ).joinpath(__snake_case )
try:
shutil.copyfile(__snake_case , __snake_case )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCAmelCase : Tuple = self.model_save_dir.joinpath(__snake_case )
if src_path.exists():
UpperCAmelCase : List[Any] = Path(__snake_case ).joinpath(__snake_case )
try:
shutil.copyfile(__snake_case , __snake_case )
except shutil.SameFileError:
pass
def A ( self : Optional[int] , __snake_case : Union[str, os.PathLike] , **__snake_case : int , ) -> List[Any]:
if os.path.isfile(__snake_case ):
logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(__snake_case , exist_ok=__snake_case )
# saving model weights/files
self._save_pretrained(__snake_case , **__snake_case )
@classmethod
def A ( cls : Optional[int] , __snake_case : Union[str, Path] , __snake_case : Optional[Union[bool, str, None]] = None , __snake_case : Optional[Union[str, None]] = None , __snake_case : bool = False , __snake_case : Optional[str] = None , __snake_case : Optional[str] = None , __snake_case : Optional[str] = None , __snake_case : Optional["ort.SessionOptions"] = None , **__snake_case : Any , ) -> List[str]:
UpperCAmelCase : Dict = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__snake_case ):
UpperCAmelCase : Optional[Any] = OnnxRuntimeModel.load_model(
os.path.join(__snake_case , __snake_case ) , provider=__snake_case , sess_options=__snake_case )
UpperCAmelCase : List[Any] = Path(__snake_case )
# load model from hub
else:
# download model
UpperCAmelCase : List[str] = hf_hub_download(
repo_id=__snake_case , filename=__snake_case , use_auth_token=__snake_case , revision=__snake_case , cache_dir=__snake_case , force_download=__snake_case , )
UpperCAmelCase : int = Path(__snake_case ).parent
UpperCAmelCase : Union[str, Any] = Path(__snake_case ).name
UpperCAmelCase : Optional[Any] = OnnxRuntimeModel.load_model(__snake_case , provider=__snake_case , sess_options=__snake_case )
return cls(model=__snake_case , **__snake_case )
@classmethod
def A ( cls : Any , __snake_case : Union[str, Path] , __snake_case : bool = True , __snake_case : Optional[str] = None , __snake_case : Optional[str] = None , **__snake_case : Optional[int] , ) -> str:
UpperCAmelCase : str = None
if len(str(__snake_case ).split('''@''' ) ) == 2:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = model_id.split('''@''' )
return cls._from_pretrained(
model_id=__snake_case , revision=__snake_case , cache_dir=__snake_case , force_download=__snake_case , use_auth_token=__snake_case , **__snake_case , )
| 23 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _lowercase ( __snake_case ) -> List[str]:
if isinstance(__snake_case ,collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class A__ :
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Optional[Any]) -> str:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: str) -> int:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Tuple:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: float) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : str = np.abs((a - b)).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , F"""Difference between torch and flax is {diff} (>= {tol}).""")
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Any=None , **_SCREAMING_SNAKE_CASE: Tuple) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim))
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Any]=None , **_SCREAMING_SNAKE_CASE: List[str]) -> Dict:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim))
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any]=None , **_SCREAMING_SNAKE_CASE: Tuple) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = after_output[0]
__lowerCAmelCase : Any = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Any=None , **_SCREAMING_SNAKE_CASE: List[str]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Any = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model(
input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase : List[str] = to_atuple(vision_model.config.image_size)
__lowerCAmelCase : Any = to_atuple(vision_model.config.patch_size)
__lowerCAmelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCAmelCase : Tuple = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
__lowerCAmelCase : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: int) -> str:
"""simple docstring"""
pt_model.to(_SCREAMING_SNAKE_CASE)
pt_model.eval()
# prepare inputs
__lowerCAmelCase : Union[str, Any] = inputs_dict
__lowerCAmelCase : Union[str, Any] = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowerCAmelCase : Any = pt_model(**_SCREAMING_SNAKE_CASE).to_tuple()
__lowerCAmelCase : List[Any] = fx_model(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2)
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = fx_model_loaded(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2)
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE)
pt_model_loaded.to(_SCREAMING_SNAKE_CASE)
pt_model_loaded.eval()
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = pt_model_loaded(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output_loaded.numpy() , 4e-2)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Dict) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = fx_state
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Dict) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params)
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Dict) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.prepare_config_and_inputs()
self.check_save_load(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: int) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_SCREAMING_SNAKE_CASE)
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Dict = self.prepare_config_and_inputs()
__lowerCAmelCase : List[Any] = config_inputs_dict.pop("vision_config")
__lowerCAmelCase : str = config_inputs_dict.pop("text_config")
__lowerCAmelCase : Union[str, Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.check_equivalence_flax_to_pt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
@slow
def _SCREAMING_SNAKE_CASE ( self: str) -> Dict:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Dict = self.get_pretrained_model_and_inputs()
__lowerCAmelCase : Union[str, Any] = model_a(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model_a(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = after_outputs[0]
__lowerCAmelCase : List[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-5)
@require_flax
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = 13
__lowerCAmelCase : Optional[int] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
__lowerCAmelCase : List[Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
__lowerCAmelCase : List[Any] = random_attention_mask([batch_size, 4])
__lowerCAmelCase : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[str] = FlaxViTModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = FlaxBertModel(_SCREAMING_SNAKE_CASE)
return vision_model, text_model
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = FlaxViTModelTester(self)
__lowerCAmelCase : Optional[Any] = FlaxBertModelTester(self)
__lowerCAmelCase : int = vit_model_tester.prepare_config_and_inputs()
__lowerCAmelCase : List[str] = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase : Tuple = vision_config_and_inputs
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = 13
__lowerCAmelCase : List[str] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
__lowerCAmelCase : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
__lowerCAmelCase : str = random_attention_mask([batch_size, 4])
__lowerCAmelCase : Optional[int] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : int = FlaxCLIPVisionModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = FlaxBertModel(_SCREAMING_SNAKE_CASE)
return vision_model, text_model
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = FlaxCLIPVisionModelTester(self)
__lowerCAmelCase : str = FlaxBertModelTester(self)
__lowerCAmelCase : Optional[Any] = clip_model_tester.prepare_config_and_inputs()
__lowerCAmelCase : Dict = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase : Any = vision_config_and_inputs
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0)
__lowerCAmelCase : str = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian")
__lowerCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
__lowerCAmelCase : Optional[int] = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="np")
__lowerCAmelCase : List[str] = model(**_SCREAMING_SNAKE_CASE)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowerCAmelCase : List[str] = np.array([[1.228_4727, 0.310_4122]])
self.assertTrue(np.allclose(outputs.logits_per_image , _SCREAMING_SNAKE_CASE , atol=1e-3)) | 269 | 0 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Tuple=False ) -> Tuple:
try:
__snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case = strtobool(snake_case_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
snake_case_ = parse_flag_from_env('RUN_SLOW', default=False)
def lowerCamelCase__ ( snake_case_ : Tuple ) -> List[Any]:
return unittest.skip('''Test was skipped''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Union[str, Any] ) -> Dict:
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : str ) -> Tuple:
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Tuple ) -> Optional[int]:
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : str ) -> Union[str, Any]:
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Dict ) -> List[Any]:
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Union[str, Any] ) -> Dict:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> List[Any]:
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Tuple ) -> Optional[int]:
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : List[Any] ) -> Union[str, Any]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Dict ) -> Optional[Any]:
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Dict ) -> Tuple:
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Union[str, Any] ) -> Tuple:
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : List[Any] ) -> Dict:
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : List[str] ) -> Any:
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Optional[int] ) -> Tuple:
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : str=None , snake_case_ : Union[str, Any]=None ) -> int:
if test_case is None:
return partial(snake_case_ , version=snake_case_ )
return unittest.skipUnless(is_torch_version('''>=''' , snake_case_ ) , f"""test requires torch version >= {version}""" )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : int ) -> Tuple:
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : str ) -> Optional[Any]:
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : str ) -> Dict:
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(snake_case_ )
snake_case_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase__ ( snake_case_ : int ) -> Tuple:
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(snake_case_ )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A_ : Optional[Any] = True
@classmethod
def a (cls : int ):
"""simple docstring"""
__snake_case = tempfile.mkdtemp()
@classmethod
def a (cls : List[str] ):
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def a (self : str ):
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(a__ )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Optional[int] , a__ : Union[mock.Mock, List[mock.Mock]] ):
"""simple docstring"""
__snake_case = mocks if isinstance(a__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase__ ( snake_case_ : int ) -> Dict:
__snake_case = AcceleratorState()
__snake_case = tensor[None].clone().to(state.device )
__snake_case = gather(snake_case_ ).cpu()
__snake_case = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , snake_case_ ):
return False
return True
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Tuple , a__ : int , a__ : Dict , a__ : Optional[int] ):
"""simple docstring"""
__snake_case = returncode
__snake_case = stdout
__snake_case = stderr
async def lowerCamelCase__ ( snake_case_ : Tuple , snake_case_ : Tuple ) -> Optional[Any]:
while True:
__snake_case = await stream.readline()
if line:
callback(snake_case_ )
else:
break
async def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : Union[str, Any]=None , snake_case_ : Tuple=None , snake_case_ : Tuple=None , snake_case_ : int=False , snake_case_ : Any=False ) -> _RunOutput:
if echo:
print('''\nRunning: ''' , ''' '''.join(snake_case_ ) )
__snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=snake_case_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=snake_case_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case = []
__snake_case = []
def tee(snake_case_ : Dict , snake_case_ : str , snake_case_ : str , snake_case_ : Dict="" ):
__snake_case = line.decode('''utf-8''' ).rstrip()
sink.append(snake_case_ )
if not quiet:
print(snake_case_ , snake_case_ , file=snake_case_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda snake_case_ : tee(snake_case_ , snake_case_ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda snake_case_ : tee(snake_case_ , snake_case_ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=snake_case_ , )
return _RunOutput(await p.wait() , snake_case_ , snake_case_ )
def lowerCamelCase__ ( snake_case_ : Tuple , snake_case_ : Union[str, Any]=None , snake_case_ : Union[str, Any]=None , snake_case_ : Optional[int]=180 , snake_case_ : Optional[int]=False , snake_case_ : str=True ) -> _RunOutput:
__snake_case = asyncio.get_event_loop()
__snake_case = loop.run_until_complete(
_stream_subprocess(snake_case_ , env=snake_case_ , stdin=snake_case_ , timeout=snake_case_ , quiet=snake_case_ , echo=snake_case_ ) )
__snake_case = ''' '''.join(snake_case_ )
if result.returncode > 0:
__snake_case = '''\n'''.join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
return result
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
pass
def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : List[str]=False ) -> Tuple:
try:
__snake_case = subprocess.check_output(snake_case_ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(snake_case_ , '''decode''' ):
__snake_case = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"""Command `{' '.join(snake_case_ )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 24 |
"""simple docstring"""
from __future__ import annotations
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ) -> list:
__lowerCAmelCase : Dict = []
__lowerCAmelCase , __lowerCAmelCase : Any = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__lowerCAmelCase : int = result + left + right
return input_list
def _lowercase ( __snake_case ) -> list:
if len(__snake_case ) <= 1:
return input_list
__lowerCAmelCase : int = list(__snake_case )
# iteration for two-way merging
__lowerCAmelCase : Optional[int] = 2
while p <= len(__snake_case ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 ,len(__snake_case ) ,__snake_case ):
__lowerCAmelCase : Union[str, Any] = i
__lowerCAmelCase : Tuple = i + p - 1
__lowerCAmelCase : Optional[Any] = (low + high + 1) // 2
__lowerCAmelCase : Any = merge(__snake_case ,__snake_case ,__snake_case ,__snake_case )
# final merge of last two parts
if p * 2 >= len(__snake_case ):
__lowerCAmelCase : Optional[Any] = i
__lowerCAmelCase : Union[str, Any] = merge(__snake_case ,0 ,__snake_case ,len(__snake_case ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__snake_case : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
__snake_case : Optional[int] = []
else:
__snake_case : int = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted)) | 269 | 0 |
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __get__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> Optional[int]:
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
SCREAMING_SNAKE_CASE__ : Tuple = """__cached_""" + self.fget.__name__
SCREAMING_SNAKE_CASE__ : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if cached is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.fget(SCREAMING_SNAKE_CASE__ )
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return cached
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def lowercase_ ( _snake_case ):
if is_torch_fx_proxy(_snake_case ):
return True
if is_torch_available():
import torch
if isinstance(_snake_case ,torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_snake_case ,tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_snake_case ,(jnp.ndarray, Tracer) ):
return True
return isinstance(_snake_case ,np.ndarray )
def lowercase_ ( _snake_case ):
return isinstance(_snake_case ,np.ndarray )
def lowercase_ ( _snake_case ):
return _is_numpy(_snake_case )
def lowercase_ ( _snake_case ):
import torch
return isinstance(_snake_case ,torch.Tensor )
def lowercase_ ( _snake_case ):
return False if not is_torch_available() else _is_torch(_snake_case )
def lowercase_ ( _snake_case ):
import torch
return isinstance(_snake_case ,torch.device )
def lowercase_ ( _snake_case ):
return False if not is_torch_available() else _is_torch_device(_snake_case )
def lowercase_ ( _snake_case ):
import torch
if isinstance(_snake_case ,_snake_case ):
if hasattr(_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = getattr(_snake_case ,_snake_case )
else:
return False
return isinstance(_snake_case ,torch.dtype )
def lowercase_ ( _snake_case ):
return False if not is_torch_available() else _is_torch_dtype(_snake_case )
def lowercase_ ( _snake_case ):
import tensorflow as tf
return isinstance(_snake_case ,tf.Tensor )
def lowercase_ ( _snake_case ):
return False if not is_tf_available() else _is_tensorflow(_snake_case )
def lowercase_ ( _snake_case ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_snake_case ,"""is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(_snake_case )
return type(_snake_case ) == tf.Tensor
def lowercase_ ( _snake_case ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(_snake_case )
def lowercase_ ( _snake_case ):
import jax.numpy as jnp # noqa: F811
return isinstance(_snake_case ,jnp.ndarray )
def lowercase_ ( _snake_case ):
return False if not is_flax_available() else _is_jax(_snake_case )
def lowercase_ ( _snake_case ):
if isinstance(_snake_case ,(dict, UserDict) ):
return {k: to_py_obj(_snake_case ) for k, v in obj.items()}
elif isinstance(_snake_case ,(list, tuple) ):
return [to_py_obj(_snake_case ) for o in obj]
elif is_tf_tensor(_snake_case ):
return obj.numpy().tolist()
elif is_torch_tensor(_snake_case ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_snake_case ):
return np.asarray(_snake_case ).tolist()
elif isinstance(_snake_case ,(np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowercase_ ( _snake_case ):
if isinstance(_snake_case ,(dict, UserDict) ):
return {k: to_numpy(_snake_case ) for k, v in obj.items()}
elif isinstance(_snake_case ,(list, tuple) ):
return np.array(_snake_case )
elif is_tf_tensor(_snake_case ):
return obj.numpy()
elif is_torch_tensor(_snake_case ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_snake_case ):
return np.asarray(_snake_case )
else:
return obj
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = fields(self )
# Safety and consistency checks
if not len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
SCREAMING_SNAKE_CASE__ : List[Any] = getattr(self , class_fields[0].name )
SCREAMING_SNAKE_CASE__ : Dict = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = first_field.items()
SCREAMING_SNAKE_CASE__ : Any = True
else:
try:
SCREAMING_SNAKE_CASE__ : List[Any] = iter(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = True
except TypeError:
SCREAMING_SNAKE_CASE__ : List[str] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(SCREAMING_SNAKE_CASE__ ):
if (
not isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) )
or not len(SCREAMING_SNAKE_CASE__ ) == 2
or not isinstance(element[0] , SCREAMING_SNAKE_CASE__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
SCREAMING_SNAKE_CASE__ : Dict = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = element[1]
elif first_field is not None:
SCREAMING_SNAKE_CASE__ : Any = first_field
else:
for field in class_fields:
SCREAMING_SNAKE_CASE__ : Tuple = getattr(self , field.name )
if v is not None:
SCREAMING_SNAKE_CASE__ : Dict = v
def __delitem__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__(self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
super().__setattr__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __setitem__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
super().__setitem__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Tuple[Any]:
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class lowerCAmelCase_ (a__ , a__ ):
"""simple docstring"""
@classmethod
def __magic_name__ (cls , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Dict = '''longest'''
__UpperCamelCase : List[Any] = '''max_length'''
__UpperCamelCase : List[str] = '''do_not_pad'''
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : str = '''pt'''
__UpperCamelCase : str = '''tf'''
__UpperCamelCase : List[str] = '''np'''
__UpperCamelCase : Optional[Any] = '''jax'''
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = context_managers
SCREAMING_SNAKE_CASE__ : int = ExitStack()
def __enter__(self ) -> Any:
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(SCREAMING_SNAKE_CASE__ )
def __exit__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
self.stack.__exit__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = infer_framework(_snake_case )
if framework == "tf":
SCREAMING_SNAKE_CASE__ : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
SCREAMING_SNAKE_CASE__ : Dict = inspect.signature(model_class.forward ) # PyTorch models
else:
SCREAMING_SNAKE_CASE__ : str = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : str = model_class.__name__
SCREAMING_SNAKE_CASE__ : List[str] = infer_framework(_snake_case )
if framework == "tf":
SCREAMING_SNAKE_CASE__ : Optional[int] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
SCREAMING_SNAKE_CASE__ : Tuple = inspect.signature(model_class.forward ) # PyTorch models
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowercase_ ( _snake_case ,_snake_case = "" ,_snake_case = "." ):
def _flatten_dict(_snake_case ,_snake_case="" ,_snake_case="." ):
for k, v in d.items():
SCREAMING_SNAKE_CASE__ : str = str(_snake_case ) + delimiter + str(_snake_case ) if parent_key else k
if v and isinstance(_snake_case ,_snake_case ):
yield from flatten_dict(_snake_case ,_snake_case ,delimiter=_snake_case ).items()
else:
yield key, v
return dict(_flatten_dict(_snake_case ,_snake_case ,_snake_case ) )
@contextmanager
def lowercase_ ( _snake_case ,_snake_case = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowercase_ ( _snake_case ,_snake_case=None ):
if is_numpy_array(_snake_case ):
return np.transpose(_snake_case ,axes=_snake_case )
elif is_torch_tensor(_snake_case ):
return array.T if axes is None else array.permute(*_snake_case )
elif is_tf_tensor(_snake_case ):
import tensorflow as tf
return tf.transpose(_snake_case ,perm=_snake_case )
elif is_jax_tensor(_snake_case ):
return jnp.transpose(_snake_case ,axes=_snake_case )
else:
raise ValueError(f'''Type not supported for transpose: {type(_snake_case )}.''' )
def lowercase_ ( _snake_case ,_snake_case ):
if is_numpy_array(_snake_case ):
return np.reshape(_snake_case ,_snake_case )
elif is_torch_tensor(_snake_case ):
return array.reshape(*_snake_case )
elif is_tf_tensor(_snake_case ):
import tensorflow as tf
return tf.reshape(_snake_case ,_snake_case )
elif is_jax_tensor(_snake_case ):
return jnp.reshape(_snake_case ,_snake_case )
else:
raise ValueError(f'''Type not supported for reshape: {type(_snake_case )}.''' )
def lowercase_ ( _snake_case ,_snake_case=None ):
if is_numpy_array(_snake_case ):
return np.squeeze(_snake_case ,axis=_snake_case )
elif is_torch_tensor(_snake_case ):
return array.squeeze() if axis is None else array.squeeze(dim=_snake_case )
elif is_tf_tensor(_snake_case ):
import tensorflow as tf
return tf.squeeze(_snake_case ,axis=_snake_case )
elif is_jax_tensor(_snake_case ):
return jnp.squeeze(_snake_case ,axis=_snake_case )
else:
raise ValueError(f'''Type not supported for squeeze: {type(_snake_case )}.''' )
def lowercase_ ( _snake_case ,_snake_case ):
if is_numpy_array(_snake_case ):
return np.expand_dims(_snake_case ,_snake_case )
elif is_torch_tensor(_snake_case ):
return array.unsqueeze(dim=_snake_case )
elif is_tf_tensor(_snake_case ):
import tensorflow as tf
return tf.expand_dims(_snake_case ,axis=_snake_case )
elif is_jax_tensor(_snake_case ):
return jnp.expand_dims(_snake_case ,axis=_snake_case )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(_snake_case )}.''' )
def lowercase_ ( _snake_case ):
if is_numpy_array(_snake_case ):
return np.size(_snake_case )
elif is_torch_tensor(_snake_case ):
return array.numel()
elif is_tf_tensor(_snake_case ):
import tensorflow as tf
return tf.size(_snake_case )
elif is_jax_tensor(_snake_case ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(_snake_case )}.''' )
def lowercase_ ( _snake_case ,_snake_case ):
for key, value in auto_map.items():
if isinstance(_snake_case ,(tuple, list) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = [f'''{repo_id}--{v}''' if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
SCREAMING_SNAKE_CASE__ : Any = f'''{repo_id}--{value}'''
return auto_map
def lowercase_ ( _snake_case ):
for base_class in inspect.getmro(_snake_case ):
SCREAMING_SNAKE_CASE__ : Any = base_class.__module__
SCREAMING_SNAKE_CASE__ : Any = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 25 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small")
__lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("google/mt5-small")
__lowerCAmelCase : Tuple = tokenizer("Hello there" , return_tensors="np").input_ids
__lowerCAmelCase : Dict = tokenizer("Hi I am" , return_tensors="np").input_ids
__lowerCAmelCase : str = shift_tokens_right(_SCREAMING_SNAKE_CASE , model.config.pad_token_id , model.config.decoder_start_token_id)
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE).logits
__lowerCAmelCase : int = optax.softmax_cross_entropy(_SCREAMING_SNAKE_CASE , onehot(_SCREAMING_SNAKE_CASE , logits.shape[-1])).mean()
__lowerCAmelCase : List[str] = -(labels.shape[-1] * loss.item())
__lowerCAmelCase : str = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) | 269 | 0 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = len(snake_case_ )
_A : int = len(snake_case_ )
_A : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_A : list = []
for char_count in range(snake_case_ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(snake_case_ )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 26 |
"""simple docstring"""
import re
def _lowercase ( __snake_case ) -> str:
if len(re.findall("[ATCG]" ,__snake_case ) ) != len(__snake_case ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" ,"TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 269 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : int = logging.get_logger(__name__)
__lowercase : Union[str, Any] = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "big_bird"
def __init__( self , __a=5_0358 , __a=768 , __a=12 , __a=12 , __a=3072 , __a="gelu_new" , __a=0.1 , __a=0.1 , __a=4096 , __a=2 , __a=0.02 , __a=1E-1_2 , __a=True , __a=0 , __a=1 , __a=2 , __a=66 , __a="block_sparse" , __a=True , __a=False , __a=64 , __a=3 , __a=None , **__a , ):
'''simple docstring'''
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , )
__a : int = vocab_size
__a : Tuple = max_position_embeddings
__a : int = hidden_size
__a : Optional[Any] = num_hidden_layers
__a : Optional[Any] = num_attention_heads
__a : Dict = intermediate_size
__a : Tuple = hidden_act
__a : str = hidden_dropout_prob
__a : Union[str, Any] = attention_probs_dropout_prob
__a : Any = initializer_range
__a : Optional[int] = type_vocab_size
__a : str = layer_norm_eps
__a : List[Any] = use_cache
__a : Union[str, Any] = rescale_embeddings
__a : Tuple = attention_type
__a : Any = use_bias
__a : List[Any] = block_size
__a : Optional[Any] = num_random_blocks
__a : List[Any] = classifier_dropout
class __UpperCamelCase ( lowerCAmelCase_ ):
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
__a : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 27 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = FunnelTokenizer
SCREAMING_SNAKE_CASE = FunnelTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Optional[int]:
"""simple docstring"""
super().setUp()
__lowerCAmelCase : str = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , **_SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any , **_SCREAMING_SNAKE_CASE: Any) -> str:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: str) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = "UNwant\u00E9d,running"
__lowerCAmelCase : str = "unwanted, running"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Any = self.tokenizer_class(self.vocab_file)
__lowerCAmelCase : Any = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE) , [7, 4, 5, 10, 8, 9])
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.get_tokenizers(do_lower_case=_SCREAMING_SNAKE_CASE)
for tokenizer in tokenizers:
__lowerCAmelCase : List[str] = tokenizer("UNwant\u00E9d,running")
__lowerCAmelCase : Optional[int] = len(inputs["input_ids"]) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len)
__lowerCAmelCase : List[str] = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running")
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len) | 269 | 0 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowerCamelCase : Dict = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : List[str] = "ResNetConfig"
# Base docstring
_lowerCamelCase : Tuple = "microsoft/resnet-50"
_lowerCamelCase : Union[str, Any] = [1, 2048, 7, 7]
# Image classification docstring
_lowerCamelCase : Dict = "microsoft/resnet-50"
_lowerCamelCase : Optional[int] = "tiger cat"
_lowerCamelCase : str = [
"microsoft/resnet-50",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 1 , UpperCamelCase__ : str = "relu" ):
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Convad(
UpperCamelCase__ , UpperCamelCase__ , kernel_size=UpperCamelCase__ , stride=UpperCamelCase__ , padding=kernel_size // 2 , bias=UpperCamelCase__ )
UpperCamelCase = nn.BatchNormad(UpperCamelCase__ )
UpperCamelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def A ( self : Dict , UpperCamelCase__ : Tensor ):
"""simple docstring"""
UpperCamelCase = self.convolution(UpperCamelCase__ )
UpperCamelCase = self.normalization(UpperCamelCase__ )
UpperCamelCase = self.activation(UpperCamelCase__ )
return hidden_state
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , UpperCamelCase__ : ResNetConfig ):
"""simple docstring"""
super().__init__()
UpperCamelCase = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
UpperCamelCase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
UpperCamelCase = config.num_channels
def A ( self : Union[str, Any] , UpperCamelCase__ : Tensor ):
"""simple docstring"""
UpperCamelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
UpperCamelCase = self.embedder(UpperCamelCase__ )
UpperCamelCase = self.pooler(UpperCamelCase__ )
return embedding
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int = 2 ):
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Convad(UpperCamelCase__ , UpperCamelCase__ , kernel_size=1 , stride=UpperCamelCase__ , bias=UpperCamelCase__ )
UpperCamelCase = nn.BatchNormad(UpperCamelCase__ )
def A ( self : Any , UpperCamelCase__ : Tensor ):
"""simple docstring"""
UpperCamelCase = self.convolution(UpperCamelCase__ )
UpperCamelCase = self.normalization(UpperCamelCase__ )
return hidden_state
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int = 1 , UpperCamelCase__ : str = "relu" ):
"""simple docstring"""
super().__init__()
UpperCamelCase = in_channels != out_channels or stride != 1
UpperCamelCase = (
ResNetShortCut(UpperCamelCase__ , UpperCamelCase__ , stride=UpperCamelCase__ ) if should_apply_shortcut else nn.Identity()
)
UpperCamelCase = nn.Sequential(
ResNetConvLayer(UpperCamelCase__ , UpperCamelCase__ , stride=UpperCamelCase__ ) , ResNetConvLayer(UpperCamelCase__ , UpperCamelCase__ , activation=UpperCamelCase__ ) , )
UpperCamelCase = ACTaFN[activation]
def A ( self : str , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = hidden_state
UpperCamelCase = self.layer(UpperCamelCase__ )
UpperCamelCase = self.shortcut(UpperCamelCase__ )
hidden_state += residual
UpperCamelCase = self.activation(UpperCamelCase__ )
return hidden_state
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int = 1 , UpperCamelCase__ : str = "relu" , UpperCamelCase__ : int = 4 ):
"""simple docstring"""
super().__init__()
UpperCamelCase = in_channels != out_channels or stride != 1
UpperCamelCase = out_channels // reduction
UpperCamelCase = (
ResNetShortCut(UpperCamelCase__ , UpperCamelCase__ , stride=UpperCamelCase__ ) if should_apply_shortcut else nn.Identity()
)
UpperCamelCase = nn.Sequential(
ResNetConvLayer(UpperCamelCase__ , UpperCamelCase__ , kernel_size=1 ) , ResNetConvLayer(UpperCamelCase__ , UpperCamelCase__ , stride=UpperCamelCase__ ) , ResNetConvLayer(UpperCamelCase__ , UpperCamelCase__ , kernel_size=1 , activation=UpperCamelCase__ ) , )
UpperCamelCase = ACTaFN[activation]
def A ( self : Dict , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = hidden_state
UpperCamelCase = self.layer(UpperCamelCase__ )
UpperCamelCase = self.shortcut(UpperCamelCase__ )
hidden_state += residual
UpperCamelCase = self.activation(UpperCamelCase__ )
return hidden_state
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : ResNetConfig , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int = 2 , UpperCamelCase__ : int = 2 , ):
"""simple docstring"""
super().__init__()
UpperCamelCase = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
UpperCamelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(UpperCamelCase__ , UpperCamelCase__ , stride=UpperCamelCase__ , activation=config.hidden_act ) , *[layer(UpperCamelCase__ , UpperCamelCase__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def A ( self : Dict , UpperCamelCase__ : Tensor ):
"""simple docstring"""
UpperCamelCase = input
for layer in self.layers:
UpperCamelCase = layer(UpperCamelCase__ )
return hidden_state
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : ResNetConfig ):
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
UpperCamelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(UpperCamelCase__ , config.depths[1:] ):
self.stages.append(ResNetStage(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , depth=UpperCamelCase__ ) )
def A ( self : Tuple , UpperCamelCase__ : Tensor , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True ):
"""simple docstring"""
UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase = hidden_states + (hidden_state,)
UpperCamelCase = stage_module(UpperCamelCase__ )
if output_hidden_states:
UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCamelCase__ , hidden_states=UpperCamelCase__ , )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ResNetConfig
_SCREAMING_SNAKE_CASE = """resnet"""
_SCREAMING_SNAKE_CASE = """pixel_values"""
_SCREAMING_SNAKE_CASE = True
def A ( self : List[str] , UpperCamelCase__ : Dict ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(UpperCamelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str=False ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = value
_lowerCamelCase : Union[str, Any] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowerCamelCase : Any = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" , _a , )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : str , UpperCamelCase__ : Any ):
"""simple docstring"""
super().__init__(UpperCamelCase__ )
UpperCamelCase = config
UpperCamelCase = ResNetEmbeddings(UpperCamelCase__ )
UpperCamelCase = ResNetEncoder(UpperCamelCase__ )
UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : str , UpperCamelCase__ : Tensor , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None ):
"""simple docstring"""
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.embedder(UpperCamelCase__ )
UpperCamelCase = self.encoder(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ )
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(UpperCamelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase__ , pooler_output=UpperCamelCase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , _a , )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : Dict ):
"""simple docstring"""
super().__init__(UpperCamelCase__ )
UpperCamelCase = config.num_labels
UpperCamelCase = ResNetModel(UpperCamelCase__ )
# classification head
UpperCamelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : List[Any] , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ):
"""simple docstring"""
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.resnet(UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ )
UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase = self.classifier(UpperCamelCase__ )
UpperCamelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase = 'single_label_classification'
else:
UpperCamelCase = 'multi_label_classification'
if self.config.problem_type == "regression":
UpperCamelCase = MSELoss()
if self.num_labels == 1:
UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCamelCase = loss_fct(UpperCamelCase__ , UpperCamelCase__ )
elif self.config.problem_type == "single_label_classification":
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase = BCEWithLogitsLoss()
UpperCamelCase = loss_fct(UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
UpperCamelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCamelCase__ , logits=UpperCamelCase__ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
""" , _a , )
class SCREAMING_SNAKE_CASE ( _a , _a ):
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : Tuple ):
"""simple docstring"""
super().__init__(UpperCamelCase__ )
super()._init_backbone(UpperCamelCase__ )
UpperCamelCase = [config.embedding_size] + config.hidden_sizes
UpperCamelCase = ResNetEmbeddings(UpperCamelCase__ )
UpperCamelCase = ResNetEncoder(UpperCamelCase__ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
@replace_return_docstrings(output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC )
def A ( self : Any , UpperCamelCase__ : Tensor , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None ):
"""simple docstring"""
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = self.embedder(UpperCamelCase__ )
UpperCamelCase = self.encoder(UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
UpperCamelCase = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=UpperCamelCase__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=UpperCamelCase__ , )
| 28 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _lowercase ( __snake_case = "laptop" ) -> DataFrame:
__lowerCAmelCase : str = F"""https://www.amazon.in/laptop/s?k={product}"""
__lowerCAmelCase : Union[str, Any] = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
__lowerCAmelCase : List[str] = BeautifulSoup(requests.get(__snake_case ,headers=__snake_case ).text )
# Initialize a Pandas dataframe with the column titles
__lowerCAmelCase : Dict = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" ,attrs={"class": "s-result-item", "data-component-type": "s-search-result"} ,) ,soup.find_all("div" ,attrs={"class": "a-row a-size-base a-color-base"} ) ,):
try:
__lowerCAmelCase : Any = item.ha.text
__lowerCAmelCase : Union[str, Any] = "https://www.amazon.in/" + item.ha.a["href"]
__lowerCAmelCase : Any = item.find("span" ,attrs={"class": "a-offscreen"} ).text
try:
__lowerCAmelCase : Union[str, Any] = item.find("span" ,attrs={"class": "a-icon-alt"} ).text
except AttributeError:
__lowerCAmelCase : Optional[Any] = "Not available"
try:
__lowerCAmelCase : Union[str, Any] = (
"₹"
+ item.find(
"span" ,attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
__lowerCAmelCase : Dict = ""
try:
__lowerCAmelCase : str = float(
(
(
float(product_mrp.strip("₹" ).replace("," ,"" ) )
- float(product_price.strip("₹" ).replace("," ,"" ) )
)
/ float(product_mrp.strip("₹" ).replace("," ,"" ) )
)
* 100 )
except ValueError:
__lowerCAmelCase : List[str] = float("nan" )
except AttributeError:
pass
__lowerCAmelCase : int = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__lowerCAmelCase : Union[str, Any] = " "
__lowerCAmelCase : Union[str, Any] = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__snake_case : Any = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""") | 269 | 0 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=1_3 , _UpperCamelCase=7 , _UpperCamelCase=6 , _UpperCamelCase=1_7 , _UpperCamelCase=2_3 , _UpperCamelCase=1_1 , _UpperCamelCase=True , ) -> int:
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : List[Any] = seq_length
UpperCAmelCase_ : Any = act_dim
UpperCAmelCase_ : Dict = state_dim
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : List[Any] = max_length
UpperCAmelCase_ : Optional[Any] = is_training
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
UpperCAmelCase_ : Optional[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
UpperCAmelCase_ : Tuple = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase_ : Optional[Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase_ : str = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 )
UpperCAmelCase_ : Optional[Any] = random_attention_mask((self.batch_size, self.seq_length) )
UpperCAmelCase_ : Dict = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __UpperCAmelCase ( self ) -> Optional[Any]:
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> List[str]:
UpperCAmelCase_ : List[str] = DecisionTransformerModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Dict = model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : List[str] = config_and_inputs
UpperCAmelCase_ : Any = {
'states': states,
'actions': actions,
'rewards': rewards,
'returns_to_go': returns_to_go,
'timesteps': timesteps,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase (_snake_case , _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : int = (DecisionTransformerModel,) if is_torch_available() else ()
_snake_case : Tuple = ()
_snake_case : Any = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
_snake_case : List[Any] = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
_snake_case : Union[str, Any] = False
_snake_case : Optional[int] = False
_snake_case : List[str] = False
_snake_case : Optional[int] = False
_snake_case : List[str] = False
_snake_case : int = False
_snake_case : int = False
_snake_case : List[str] = False
_snake_case : List[Any] = False
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : str = DecisionTransformerModelTester(self )
UpperCAmelCase_ : str = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 )
def __UpperCAmelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
@slow
def __UpperCAmelCase ( self ) -> List[Any]:
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : int = DecisionTransformerModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = [
'states',
'actions',
'rewards',
'returns_to_go',
'timesteps',
'attention_mask',
]
self.assertListEqual(arg_names[: len(_UpperCamelCase )] , _UpperCamelCase )
@require_torch
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Optional[Any] = 2 # number of steps of autoregressive prediction we will perform
UpperCAmelCase_ : Optional[Any] = 1_0 # defined by the RL environment, may be normalized
UpperCAmelCase_ : Tuple = DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert' )
UpperCAmelCase_ : int = model.to(_UpperCamelCase )
UpperCAmelCase_ : int = model.config
torch.manual_seed(0 )
UpperCAmelCase_ : str = torch.randn(1 , 1 , config.state_dim ).to(device=_UpperCamelCase , dtype=torch.floataa ) # env.reset()
UpperCAmelCase_ : Tuple = torch.tensor(
[[0.24_27_93, -0.28_69_30_74, 0.8_74_26_13], [0.67_81_52_74, -0.08_10_10_85, -0.12_95_21_47]] , device=_UpperCamelCase )
UpperCAmelCase_ : Tuple = torch.tensor(_UpperCamelCase , device=_UpperCamelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
UpperCAmelCase_ : Optional[int] = state
UpperCAmelCase_ : str = torch.zeros(1 , 0 , config.act_dim , device=_UpperCamelCase , dtype=torch.floataa )
UpperCAmelCase_ : List[Any] = torch.zeros(1 , 0 , device=_UpperCamelCase , dtype=torch.floataa )
UpperCAmelCase_ : Optional[int] = torch.tensor(0 , device=_UpperCamelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(_UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=_UpperCamelCase )] , dim=1 )
UpperCAmelCase_ : Tuple = torch.cat([rewards, torch.zeros(1 , 1 , device=_UpperCamelCase )] , dim=1 )
UpperCAmelCase_ : List[str] = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = model(
states=_UpperCamelCase , actions=_UpperCamelCase , rewards=_UpperCamelCase , returns_to_go=_UpperCamelCase , timesteps=_UpperCamelCase , attention_mask=_UpperCamelCase , return_dict=_UpperCamelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=_UpperCamelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
UpperCAmelCase_ : List[str] = action_pred[0, -1]
UpperCAmelCase_ : Tuple = torch.cat([states, state] , dim=1 )
UpperCAmelCase_ : Tuple = returns_to_go[0, -1] - reward
UpperCAmelCase_ : Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
UpperCAmelCase_ : Optional[Any] = torch.cat(
[timesteps, torch.ones((1, 1) , device=_UpperCamelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 29 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=_SCREAMING_SNAKE_CASE , )
assert hasattr(self , "env")
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Any=1) -> Dict:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=_SCREAMING_SNAKE_CASE , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: List[Any]) -> Optional[Any]:
"""simple docstring"""
TrainingJobAnalytics(_SCREAMING_SNAKE_CASE).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""")
def _SCREAMING_SNAKE_CASE ( self: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__lowerCAmelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
__lowerCAmelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
__lowerCAmelCase : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCAmelCase : Tuple = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 99_9999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , _SCREAMING_SNAKE_CASE) | 269 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__a = logging.get_logger(__name__)
__a = {
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :str = 'van'
def __init__( self : int , SCREAMING_SNAKE_CASE_ : int=2_2_4 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3 , SCREAMING_SNAKE_CASE_ : List[str]=[7, 3, 3, 3] , SCREAMING_SNAKE_CASE_ : Optional[Any]=[4, 2, 2, 2] , SCREAMING_SNAKE_CASE_ : List[Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=[3, 3, 1_2, 3] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=[8, 8, 4, 4] , SCREAMING_SNAKE_CASE_ : Dict="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE_ : Optional[int]=1e-6 , SCREAMING_SNAKE_CASE_ : Any=1e-2 , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , **SCREAMING_SNAKE_CASE_ : int , ) -> Union[str, Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = image_size
lowercase_ = num_channels
lowercase_ = patch_sizes
lowercase_ = strides
lowercase_ = hidden_sizes
lowercase_ = depths
lowercase_ = mlp_ratios
lowercase_ = hidden_act
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = layer_scale_init_value
lowercase_ = drop_path_rate
lowercase_ = dropout_rate
| 30 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__snake_case : Optional[int] = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 269 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase_ (snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = CLIPTokenizer
__UpperCamelCase: List[Any] = CLIPTokenizerFast
__UpperCamelCase: List[str] = True
__UpperCamelCase: Any = {}
__UpperCamelCase: Any = False
def _A ( self : Any ):
super().setUp()
# fmt: off
_UpperCAmelCase : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_UpperCAmelCase : str = dict(zip(A , range(len(A ) ) ) )
_UpperCAmelCase : Union[str, Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
_UpperCAmelCase : Union[str, Any] = {"unk_token": "<unk>"}
_UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(A ) )
def _A ( self : Optional[int] , **A : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A )
def _A ( self : Tuple , **A : int ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A )
def _A ( self : Tuple , A : Any ):
_UpperCAmelCase : int = "lower newer"
_UpperCAmelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def _A ( self : Any ):
_UpperCAmelCase : List[Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCAmelCase : Dict = "lower newer"
_UpperCAmelCase : str = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
_UpperCAmelCase : List[str] = tokenizer.tokenize(A )
self.assertListEqual(A , A )
_UpperCAmelCase : List[str] = tokens + [tokenizer.unk_token]
_UpperCAmelCase : List[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
@require_ftfy
def _A ( self : Any ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(A , **A )
_UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(A , **A )
_UpperCAmelCase : Optional[Any] = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
_UpperCAmelCase : Any = tokenizer_s.tokenize(A )
_UpperCAmelCase : List[str] = tokenizer_r.tokenize(A )
self.assertListEqual(A , A )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_UpperCAmelCase : List[Any] = "xa\u0303y" + " " + "x\xe3y"
_UpperCAmelCase : List[str] = tokenizer_s.tokenize(A )
_UpperCAmelCase : Optional[Any] = tokenizer_r.tokenize(A )
self.assertListEqual(A , A )
# Test that the tokenization is identical on unicode of space type
_UpperCAmelCase : List[str] = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_UpperCAmelCase : Any = tokenizer_s.tokenize(A )
_UpperCAmelCase : int = tokenizer_r.tokenize(A )
self.assertListEqual(A , A )
# Test that the tokenization is identical on unicode of line break type
_UpperCAmelCase : Dict = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_UpperCAmelCase : Optional[int] = tokenizer_s.tokenize(A )
_UpperCAmelCase : List[Any] = tokenizer_r.tokenize(A )
self.assertListEqual(A , A )
def _A ( self : List[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCAmelCase : Optional[int] = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
_UpperCAmelCase : Optional[Any] = F"""{text_of_1_token} {text_of_1_token}"""
_UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , )
_UpperCAmelCase : int = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
_UpperCAmelCase : str = F""" {text}"""
_UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , )
_UpperCAmelCase : Optional[Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )) , )
def _A ( self : Dict ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(A ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def _A ( self : Dict ):
super().test_tokenization_python_rust_equals()
def _A ( self : str ):
# CLIP always lower cases letters
pass
| 31 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case : Optional[int] = logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'maskformer-swin'
SCREAMING_SNAKE_CASE = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: int=224 , _SCREAMING_SNAKE_CASE: Tuple=4 , _SCREAMING_SNAKE_CASE: int=3 , _SCREAMING_SNAKE_CASE: List[Any]=96 , _SCREAMING_SNAKE_CASE: Union[str, Any]=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE: Any=[3, 6, 12, 24] , _SCREAMING_SNAKE_CASE: List[str]=7 , _SCREAMING_SNAKE_CASE: List[str]=4.0 , _SCREAMING_SNAKE_CASE: Optional[int]=True , _SCREAMING_SNAKE_CASE: Tuple=0.0 , _SCREAMING_SNAKE_CASE: Any=0.0 , _SCREAMING_SNAKE_CASE: Any=0.1 , _SCREAMING_SNAKE_CASE: str="gelu" , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: Union[str, Any]=0.02 , _SCREAMING_SNAKE_CASE: str=1e-5 , _SCREAMING_SNAKE_CASE: Optional[int]=None , _SCREAMING_SNAKE_CASE: str=None , **_SCREAMING_SNAKE_CASE: Union[str, Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = image_size
__lowerCAmelCase : Any = patch_size
__lowerCAmelCase : Tuple = num_channels
__lowerCAmelCase : Any = embed_dim
__lowerCAmelCase : Any = depths
__lowerCAmelCase : Dict = len(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = num_heads
__lowerCAmelCase : Tuple = window_size
__lowerCAmelCase : Dict = mlp_ratio
__lowerCAmelCase : Any = qkv_bias
__lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
__lowerCAmelCase : int = attention_probs_dropout_prob
__lowerCAmelCase : Tuple = drop_path_rate
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : Optional[int] = use_absolute_embeddings
__lowerCAmelCase : List[str] = layer_norm_eps
__lowerCAmelCase : Any = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCAmelCase : Optional[Any] = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE) - 1))
__lowerCAmelCase : Any = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(_SCREAMING_SNAKE_CASE) + 1)]
__lowerCAmelCase , __lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names) | 269 | 0 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ):
snake_case__ : List[str] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Union[str, Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : str ) -> Any:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ):
snake_case__ : Union[str, Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : str , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ):
snake_case__ : Union[str, Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Any ) -> Tuple:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ):
snake_case__ : Tuple = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> int:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Dict , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ):
snake_case__ : str = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ) -> int:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Any ) -> Any:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ):
snake_case__ : Union[str, Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 32 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class A__ :
'''simple docstring'''
def __init__( self: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = psutil.Process()
__lowerCAmelCase : str = False
def _SCREAMING_SNAKE_CASE ( self: int) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = -1
while True:
__lowerCAmelCase : str = max(self.process.memory_info().rss , self.cpu_memory_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : str = threading.Thread(target=self.peak_monitor)
__lowerCAmelCase : Tuple = True
self.thread.start()
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = False
self.thread.join()
return self.cpu_memory_peak
__snake_case : Tuple = PeakCPUMemory()
def _lowercase ( ) -> str:
# Time
__lowerCAmelCase : str = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase : Optional[Any] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase : Union[str, Any] = torch.cuda.memory_allocated(__snake_case )
torch.cuda.reset_peak_memory_stats()
return measures
def _lowercase ( __snake_case ) -> Optional[Any]:
# Time
__lowerCAmelCase : str = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase : str = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
__lowerCAmelCase : List[str] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase : Union[str, Any] = (torch.cuda.memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**20
__lowerCAmelCase : Any = (torch.cuda.max_memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**20
return measures
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
print(F"""{description}:""" )
print(F"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(__snake_case )]:.2f}MiB""" )
__lowerCAmelCase : Optional[Any] = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" ) | 269 | 0 |
"""simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _UpperCAmelCase :
def __init__( self : Optional[Any] , A : List[Any] , A : int , A : int ) -> Dict:
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
lowercase_ : List[Any] = img
lowercase_ : Union[str, Any] = img.shape[1]
lowercase_ : Dict = img.shape[0]
lowercase_ : Union[str, Any] = dst_width
lowercase_ : Tuple = dst_height
lowercase_ : List[Any] = self.src_w / self.dst_w
lowercase_ : Dict = self.src_h / self.dst_h
lowercase_ : Tuple = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def A ( self : List[Any] ) -> Optional[int]:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowercase_ : Optional[int] = self.img[self.get_y(A )][self.get_x(A )]
def A ( self : Optional[Any] , A : int ) -> int:
return int(self.ratio_x * x )
def A ( self : Any , A : int ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
__A , __A : Dict = 800, 600
__A : int = imread('''image_data/lena.jpg''', 1)
__A : List[str] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows()
| 33 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Any = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'mctct'
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: str=8065 , _SCREAMING_SNAKE_CASE: str=1536 , _SCREAMING_SNAKE_CASE: str=36 , _SCREAMING_SNAKE_CASE: Optional[Any]=6144 , _SCREAMING_SNAKE_CASE: Optional[Any]=4 , _SCREAMING_SNAKE_CASE: Union[str, Any]=384 , _SCREAMING_SNAKE_CASE: Optional[Any]=920 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1e-5 , _SCREAMING_SNAKE_CASE: List[Any]=0.3 , _SCREAMING_SNAKE_CASE: Optional[Any]="relu" , _SCREAMING_SNAKE_CASE: Optional[int]=0.02 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.3 , _SCREAMING_SNAKE_CASE: Dict=0.3 , _SCREAMING_SNAKE_CASE: List[Any]=1 , _SCREAMING_SNAKE_CASE: Optional[Any]=0 , _SCREAMING_SNAKE_CASE: List[str]=2 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1 , _SCREAMING_SNAKE_CASE: Tuple=0.3 , _SCREAMING_SNAKE_CASE: Dict=1 , _SCREAMING_SNAKE_CASE: int=(7,) , _SCREAMING_SNAKE_CASE: str=(3,) , _SCREAMING_SNAKE_CASE: Union[str, Any]=80 , _SCREAMING_SNAKE_CASE: Tuple=1 , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: Tuple="sum" , _SCREAMING_SNAKE_CASE: List[str]=False , **_SCREAMING_SNAKE_CASE: Tuple , ) -> Tuple:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : str = hidden_size
__lowerCAmelCase : str = num_hidden_layers
__lowerCAmelCase : str = intermediate_size
__lowerCAmelCase : List[Any] = num_attention_heads
__lowerCAmelCase : Dict = attention_head_dim
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : str = layer_norm_eps
__lowerCAmelCase : Tuple = layerdrop
__lowerCAmelCase : str = hidden_act
__lowerCAmelCase : List[Any] = initializer_range
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCAmelCase : str = pad_token_id
__lowerCAmelCase : Optional[int] = bos_token_id
__lowerCAmelCase : Union[str, Any] = eos_token_id
__lowerCAmelCase : Any = conv_glu_dim
__lowerCAmelCase : Optional[int] = conv_dropout
__lowerCAmelCase : Union[str, Any] = num_conv_layers
__lowerCAmelCase : Optional[int] = input_feat_per_channel
__lowerCAmelCase : Union[str, Any] = input_channels
__lowerCAmelCase : Optional[Any] = conv_channels
__lowerCAmelCase : Dict = ctc_loss_reduction
__lowerCAmelCase : int = ctc_zero_infinity
# prevents config testing fail with exporting to json
__lowerCAmelCase : List[str] = list(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = list(_SCREAMING_SNAKE_CASE)
if len(self.conv_kernel) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel)}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""") | 269 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _a ( __a ):
def __init__( self : List[str] , lowercase : Optional[Any] , lowercase : int=13 , lowercase : Dict=7 , lowercase : Any=True , lowercase : int=True , lowercase : Optional[int]=False , lowercase : List[str]=True , lowercase : str=99 , lowercase : int=32 , lowercase : Tuple=5 , lowercase : List[str]=4 , lowercase : Optional[Any]=37 , lowercase : Optional[int]="gelu" , lowercase : Optional[int]=0.1 , lowercase : str=0.1 , lowercase : Union[str, Any]=512 , lowercase : List[str]=16 , lowercase : Optional[Any]=2 , lowercase : str=0.02 , lowercase : str=3 , lowercase : List[Any]=4 , lowercase : Union[str, Any]=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : List[str] ):
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A ( self : Union[str, Any] , lowercase : int , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Any , lowercase : Any , lowercase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = DistilBertModel(config=lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase , lowercase )
UpperCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Any , lowercase : Dict , lowercase : List[str] , lowercase : List[Any] , lowercase : Tuple , lowercase : Union[str, Any] , lowercase : int ):
'''simple docstring'''
UpperCAmelCase = DistilBertForMaskedLM(config=lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Tuple , lowercase : int , lowercase : Tuple , lowercase : Optional[Any] , lowercase : str , lowercase : List[Any] , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = DistilBertForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(
lowercase , attention_mask=lowercase , start_positions=lowercase , end_positions=lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Tuple , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : List[str] , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : int ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = DistilBertForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : List[str] , lowercase : Optional[Any] , lowercase : Tuple , lowercase : Any , lowercase : str , lowercase : Optional[Any] , lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = DistilBertForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Any , lowercase : Tuple , lowercase : List[str] , lowercase : Dict , lowercase : Union[str, Any] , lowercase : Any , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = self.num_choices
UpperCAmelCase = DistilBertForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = model(
lowercase , attention_mask=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) = config_and_inputs
UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _a ( __a , __a , unittest.TestCase ):
__a : int = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
__a : Any = (
{
"""feature-extraction""": DistilBertModel,
"""fill-mask""": DistilBertForMaskedLM,
"""question-answering""": DistilBertForQuestionAnswering,
"""text-classification""": DistilBertForSequenceClassification,
"""token-classification""": DistilBertForTokenClassification,
"""zero-shot""": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : List[str] = True
__a : Optional[int] = True
__a : Optional[Any] = True
__a : Union[str, Any] = True
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = DistilBertModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase , dim=37 )
def A ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase )
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase )
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase )
@slow
def A ( self : Tuple ):
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = DistilBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@slow
@require_torch_gpu
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
UpperCAmelCase = True
UpperCAmelCase = model_class(config=lowercase )
UpperCAmelCase = self._prepare_for_class(lowercase , lowercase )
UpperCAmelCase = torch.jit.trace(
lowercase , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase , os.path.join(lowercase , '''traced_model.pt''' ) )
UpperCAmelCase = torch.jit.load(os.path.join(lowercase , '''traced_model.pt''' ) , map_location=lowercase )
loaded(inputs_dict['''input_ids'''].to(lowercase ) , inputs_dict['''attention_mask'''].to(lowercase ) )
@require_torch
class _a ( unittest.TestCase ):
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase = model(lowercase , attention_mask=lowercase )[0]
UpperCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase )
UpperCAmelCase = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase , atol=1E-4 ) )
| 34 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
__snake_case : Optional[Any] = [8, 5, 9, 7]
__snake_case : List[Any] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__snake_case : Optional[int] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class A__ :
'''simple docstring'''
def __init__( self: Any , _SCREAMING_SNAKE_CASE: list[int] , _SCREAMING_SNAKE_CASE: list[list[int]] , _SCREAMING_SNAKE_CASE: list[list[int]] , ) -> None:
"""simple docstring"""
__lowerCAmelCase : Any = claim_vector
__lowerCAmelCase : Tuple = allocated_resources_table
__lowerCAmelCase : Tuple = maximum_claim_table
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table)
for i in range(len(self.__allocated_resources_table[0]))
]
def _SCREAMING_SNAKE_CASE ( self: int) -> list[int]:
"""simple docstring"""
return np.array(self.__claim_vector) - np.array(
self.__processes_resource_summation())
def _SCREAMING_SNAKE_CASE ( self: int) -> list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i]) - np.array(_SCREAMING_SNAKE_CASE))
for i, allocated_resource in enumerate(self.__allocated_resources_table)
]
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(_SCREAMING_SNAKE_CASE): i for i in self.__need()}
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , **_SCREAMING_SNAKE_CASE: List[Any]) -> None:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.__need()
__lowerCAmelCase : int = self.__allocated_resources_table
__lowerCAmelCase : Dict = self.__available_resources()
__lowerCAmelCase : str = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n")
while need_list:
__lowerCAmelCase : int = False
for each_need in need_list:
__lowerCAmelCase : Dict = True
for index, need in enumerate(_SCREAMING_SNAKE_CASE):
if need > available_resources[index]:
__lowerCAmelCase : Dict = False
break
if execution:
__lowerCAmelCase : Any = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowerCAmelCase : Union[str, Any] = original_need_index
print(F"""Process {process_number + 1} is executing.""")
# remove the process run from stack
need_list.remove(_SCREAMING_SNAKE_CASE)
# update available/freed resources stack
__lowerCAmelCase : Dict = np.array(_SCREAMING_SNAKE_CASE) + np.array(
alloc_resources_table[process_number])
print(
"Updated available resource stack for processes: "
+ " ".join([str(_SCREAMING_SNAKE_CASE) for x in available_resources]))
break
if safe:
print("The process is in a safe state.\n")
else:
print("System in unsafe state. Aborting...\n")
break
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> List[Any]:
"""simple docstring"""
print(" " * 9 + "Allocated Resource Table")
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(_SCREAMING_SNAKE_CASE) + 1}"""
+ " ".join(F"""{it:>8}""" for it in item)
+ "\n")
print(" " * 9 + "System Resource Table")
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(_SCREAMING_SNAKE_CASE) + 1}"""
+ " ".join(F"""{it:>8}""" for it in item)
+ "\n")
print(
"Current Usage by Active Processes: "
+ " ".join(str(_SCREAMING_SNAKE_CASE) for x in self.__claim_vector))
print(
"Initial Available Resources: "
+ " ".join(str(_SCREAMING_SNAKE_CASE) for x in self.__available_resources()))
time.sleep(1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 269 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35 |
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _lowercase ( *__snake_case ) -> Optional[Any]:
with open(__snake_case ,"r" ) as fh:
fcntl.flock(__snake_case ,fcntl.LOCK_EX )
try:
print(*__snake_case )
finally:
fcntl.flock(__snake_case ,fcntl.LOCK_UN )
__snake_case : List[Any] = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
__snake_case : List[str] = torch.device('cuda', local_rank)
__snake_case : Optional[Any] = socket.gethostname()
__snake_case : str = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__snake_case : Tuple = dist.get_rank()
__snake_case : Any = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise | 269 | 0 |
import math
from collections.abc import Iterator
from itertools import takewhile
def A ( _lowerCamelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 2
while True:
if is_prime(_lowerCamelCase ):
yield num
num += 1
def A ( _lowerCamelCase = 2_000_000 ):
'''simple docstring'''
return sum(takewhile(lambda _lowerCamelCase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 36 |
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__snake_case : Optional[int] = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
__snake_case : str = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
__snake_case : str = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def _lowercase ( __snake_case ,__snake_case ) -> Union[str, Any]:
return float((preds == labels).mean() )
def _lowercase ( __snake_case ,__snake_case ) -> str:
__lowerCAmelCase : str = simple_accuracy(__snake_case ,__snake_case )
__lowerCAmelCase : Any = float(fa_score(y_true=__snake_case ,y_pred=__snake_case ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowercase ( __snake_case ,__snake_case ) -> int:
__lowerCAmelCase : Union[str, Any] = np.array(__snake_case )
__lowerCAmelCase : Tuple = np.array(__snake_case )
__lowerCAmelCase : List[Any] = en_sentvecs.shape[0]
# mean centering
__lowerCAmelCase : Union[str, Any] = en_sentvecs - np.mean(__snake_case ,axis=0 )
__lowerCAmelCase : int = in_sentvecs - np.mean(__snake_case ,axis=0 )
__lowerCAmelCase : Optional[Any] = cdist(__snake_case ,__snake_case ,"cosine" )
__lowerCAmelCase : int = np.array(range(__snake_case ) )
__lowerCAmelCase : int = sim.argsort(axis=1 )[:, :10]
__lowerCAmelCase : Optional[Any] = np.any(preds == actual[:, None] ,axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: int) -> str:
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64")
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32")),
"references": datasets.Value("int64")
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32")),
}) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[Any]) -> int:
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]") | 269 | 0 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = 0.00
lowerCAmelCase__ : List[Any] = 0
for resistor in resistors:
if resistor <= 0:
lowerCAmelCase__ : Dict = f"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(UpperCamelCase )
first_sum += 1 / float(UpperCamelCase )
index += 1
return 1 / first_sum
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = 0.00
lowerCAmelCase__ : List[str] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowerCAmelCase__ : Union[str, Any] = f"""Resistor at index {index} has a negative value!"""
raise ValueError(UpperCamelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 |
"""simple docstring"""
def _lowercase ( __snake_case ,__snake_case ) -> float:
if digit_amount > 0:
return round(number - int(__snake_case ) ,__snake_case )
return number - int(__snake_case )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3)) | 269 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : str = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 38 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'torchsde']
def __init__( self: int , *_SCREAMING_SNAKE_CASE: Optional[Any] , **_SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
requires_backends(self , ["torch", "torchsde"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Optional[Any] , *_SCREAMING_SNAKE_CASE: Optional[int] , **_SCREAMING_SNAKE_CASE: Optional[int]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "torchsde"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Dict , *_SCREAMING_SNAKE_CASE: List[Any] , **_SCREAMING_SNAKE_CASE: Any) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch", "torchsde"]) | 269 | 0 |
import numpy as np
_a = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
_UpperCAmelCase = np.array(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = np.where(letter == self.SQUARE )
_UpperCAmelCase = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.SQUARE[indexa - 1, indexa - 1]
return letter
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = message.lower()
_UpperCAmelCase = message.replace(' ' , '' )
_UpperCAmelCase = message.replace('j' , 'i' )
_UpperCAmelCase = np.empty((2, len(UpperCAmelCase )) )
for letter_index in range(len(UpperCAmelCase ) ):
_UpperCAmelCase = self.letter_to_numbers(message[letter_index] )
_UpperCAmelCase = numbers[0]
_UpperCAmelCase = numbers[1]
_UpperCAmelCase = first_step.reshape(2 * len(UpperCAmelCase ) )
_UpperCAmelCase = ''
for numbers_index in range(len(UpperCAmelCase ) ):
_UpperCAmelCase = int(second_step[numbers_index * 2] )
_UpperCAmelCase = int(second_step[(numbers_index * 2) + 1] )
_UpperCAmelCase = self.numbers_to_letter(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = encoded_message + letter
return encoded_message
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = message.lower()
message.replace(' ' , '' )
_UpperCAmelCase = np.empty(2 * len(UpperCAmelCase ) )
for letter_index in range(len(UpperCAmelCase ) ):
_UpperCAmelCase = self.letter_to_numbers(message[letter_index] )
_UpperCAmelCase = numbers[0]
_UpperCAmelCase = numbers[1]
_UpperCAmelCase = first_step.reshape((2, len(UpperCAmelCase )) )
_UpperCAmelCase = ''
for numbers_index in range(len(UpperCAmelCase ) ):
_UpperCAmelCase = int(second_step[0, numbers_index] )
_UpperCAmelCase = int(second_step[1, numbers_index] )
_UpperCAmelCase = self.numbers_to_letter(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = decoded_message + letter
return decoded_message
| 39 |
"""simple docstring"""
def _lowercase ( ) -> int:
return 1
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(__snake_case )
def _lowercase ( __snake_case = 200 ) -> int:
return two_pound(__snake_case )
if __name__ == "__main__":
print(solution(int(input().strip()))) | 269 | 0 |
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
__lowercase = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowercase ( A_ )-> list[list[int]]:
'''simple docstring'''
a : str = []
for i in range(len(A_ ) ):
a : str = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
a : Union[str, Any] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
a : Tuple = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A_ )
return next_generation
def lowercase ( A_ , A_ )-> list[Image.Image]:
'''simple docstring'''
a : List[str] = []
for _ in range(A_ ):
# Create output image
a : str = Image.new("RGB" , (len(cells[0] ), len(A_ )) )
a : Union[str, Any] = img.load()
# Save cells to image
for x in range(len(A_ ) ):
for y in range(len(cells[0] ) ):
a : Optional[Any] = 255 - cells[y][x] * 255
a : str = (colour, colour, colour)
# Save image
images.append(A_ )
a : Tuple = new_generation(A_ )
return images
if __name__ == "__main__":
__lowercase = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 40 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8") as input_file:
__lowerCAmelCase : List[str] = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)")
__lowerCAmelCase : List[Any] = input_file.read()
__lowerCAmelCase : Any = regexp.search(_SCREAMING_SNAKE_CASE)
return match
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: str) -> Optional[Any]:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8") as input_file:
__lowerCAmelCase : Any = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL)
__lowerCAmelCase : Optional[int] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__lowerCAmelCase : int = regexp.finditer(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def _SCREAMING_SNAKE_CASE ( self: str) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = Path("./datasets")
__lowerCAmelCase : Optional[int] = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_SCREAMING_SNAKE_CASE)):
raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""")
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Dict = Path("./datasets")
__lowerCAmelCase : Union[str, Any] = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_print_statements(str(_SCREAMING_SNAKE_CASE)):
raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""") | 269 | 0 |
'''simple docstring'''
import os
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
with open(os.path.dirname(UpperCamelCase ) + """/grid.txt""" ) as f:
lowerCamelCase__ : Tuple = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCamelCase ) for x in f.readline().split()] )
lowerCamelCase__ : Dict = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCamelCase__ : Any = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCamelCase__ : List[str] = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCamelCase__ : Dict = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCamelCase__ : Dict = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCamelCase__ : List[str] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCamelCase__ : Optional[int] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCamelCase__ : int = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCamelCase__ : Tuple = temp
return maximum
if __name__ == "__main__":
print(solution())
| 41 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__snake_case : Optional[Any] = logging.get_logger(__name__)
__snake_case : Optional[Any] = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
__snake_case : Any = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
__snake_case : str = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'whisper'
SCREAMING_SNAKE_CASE = ['past_key_values']
SCREAMING_SNAKE_CASE = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: Any=5_1865 , _SCREAMING_SNAKE_CASE: Optional[Any]=80 , _SCREAMING_SNAKE_CASE: Optional[int]=6 , _SCREAMING_SNAKE_CASE: Any=4 , _SCREAMING_SNAKE_CASE: Dict=6 , _SCREAMING_SNAKE_CASE: Dict=4 , _SCREAMING_SNAKE_CASE: Optional[Any]=1536 , _SCREAMING_SNAKE_CASE: List[str]=1536 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: str=0.0 , _SCREAMING_SNAKE_CASE: List[str]=5_0257 , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: List[str]=True , _SCREAMING_SNAKE_CASE: Optional[int]="gelu" , _SCREAMING_SNAKE_CASE: Tuple=256 , _SCREAMING_SNAKE_CASE: str=0.0 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: List[Any]=0.02 , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: Union[str, Any]=1500 , _SCREAMING_SNAKE_CASE: str=448 , _SCREAMING_SNAKE_CASE: Any=5_0256 , _SCREAMING_SNAKE_CASE: Any=5_0256 , _SCREAMING_SNAKE_CASE: List[str]=5_0256 , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: List[Any]=[220, 5_0256] , _SCREAMING_SNAKE_CASE: Dict=False , _SCREAMING_SNAKE_CASE: str=256 , _SCREAMING_SNAKE_CASE: List[str]=False , _SCREAMING_SNAKE_CASE: Tuple=0.05 , _SCREAMING_SNAKE_CASE: List[str]=10 , _SCREAMING_SNAKE_CASE: str=2 , _SCREAMING_SNAKE_CASE: Any=0.0 , _SCREAMING_SNAKE_CASE: Optional[int]=10 , _SCREAMING_SNAKE_CASE: int=0 , _SCREAMING_SNAKE_CASE: Any=7 , **_SCREAMING_SNAKE_CASE: List[str] , ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = vocab_size
__lowerCAmelCase : Optional[Any] = num_mel_bins
__lowerCAmelCase : int = d_model
__lowerCAmelCase : List[Any] = encoder_layers
__lowerCAmelCase : List[Any] = encoder_attention_heads
__lowerCAmelCase : List[str] = decoder_layers
__lowerCAmelCase : Tuple = decoder_attention_heads
__lowerCAmelCase : Any = decoder_ffn_dim
__lowerCAmelCase : Tuple = encoder_ffn_dim
__lowerCAmelCase : List[str] = dropout
__lowerCAmelCase : Union[str, Any] = attention_dropout
__lowerCAmelCase : Union[str, Any] = activation_dropout
__lowerCAmelCase : Dict = activation_function
__lowerCAmelCase : Tuple = init_std
__lowerCAmelCase : str = encoder_layerdrop
__lowerCAmelCase : int = decoder_layerdrop
__lowerCAmelCase : Optional[int] = use_cache
__lowerCAmelCase : Union[str, Any] = encoder_layers
__lowerCAmelCase : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase : int = max_source_positions
__lowerCAmelCase : Any = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase : Dict = classifier_proj_size
__lowerCAmelCase : Dict = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase : int = apply_spec_augment
__lowerCAmelCase : Union[str, Any] = mask_time_prob
__lowerCAmelCase : str = mask_time_length
__lowerCAmelCase : int = mask_time_min_masks
__lowerCAmelCase : List[Any] = mask_feature_prob
__lowerCAmelCase : Tuple = mask_feature_length
__lowerCAmelCase : Any = mask_feature_min_masks
__lowerCAmelCase : Union[str, Any] = median_filter_width
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , suppress_tokens=_SCREAMING_SNAKE_CASE , begin_suppress_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__lowerCAmelCase : List[str] = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
])
if self.use_past:
__lowerCAmelCase : Tuple = {0: "batch"}
else:
__lowerCAmelCase : Union[str, Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction="inputs")
return common_inputs
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: Optional["TensorType"] = None , _SCREAMING_SNAKE_CASE: int = 2_2050 , _SCREAMING_SNAKE_CASE: float = 5.0 , _SCREAMING_SNAKE_CASE: int = 220 , ) -> Mapping[str, Any]:
"""simple docstring"""
__lowerCAmelCase : int = OrderedDict()
__lowerCAmelCase : Optional[Any] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , time_duration=_SCREAMING_SNAKE_CASE , frequency=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[str] = encoder_inputs["input_features"].shape[2]
__lowerCAmelCase : List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
__lowerCAmelCase : List[Any] = super().generate_dummy_inputs(
preprocessor.tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = encoder_inputs.pop("input_features")
__lowerCAmelCase : List[Any] = decoder_inputs.pop("decoder_input_ids")
if "past_key_values" in decoder_inputs:
__lowerCAmelCase : int = decoder_inputs.pop("past_key_values")
return dummy_inputs
@property
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> float:
"""simple docstring"""
return 1e-3 | 269 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
__lowercase = LDMTextToImagePipeline
__lowercase = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
__lowercase = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
__lowercase = TEXT_TO_IMAGE_BATCH_PARAMS
__lowercase = False
def lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
_snake_case = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , )
torch.manual_seed(0 )
_snake_case = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
_snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_snake_case = CLIPTextModel(lowerCAmelCase_ )
_snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_snake_case = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ):
"""simple docstring"""
if str(lowerCAmelCase_ ).startswith('mps' ):
_snake_case = torch.manual_seed(lowerCAmelCase_ )
else:
_snake_case = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = LDMTextToImagePipeline(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
_snake_case = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=torch.floataa , lowerCAmelCase_=0 ):
"""simple docstring"""
_snake_case = torch.manual_seed(lowerCAmelCase_ )
_snake_case = np.random.RandomState(lowerCAmelCase_ ).standard_normal((1, 4, 32, 32) )
_snake_case = torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
_snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_inputs(lowerCAmelCase_ )
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 2_56, 2_56, 3)
_snake_case = np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878] )
_snake_case = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=torch.floataa , lowerCAmelCase_=0 ):
"""simple docstring"""
_snake_case = torch.manual_seed(lowerCAmelCase_ )
_snake_case = np.random.RandomState(lowerCAmelCase_ ).standard_normal((1, 4, 32, 32) )
_snake_case = torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
_snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_inputs(lowerCAmelCase_ )
_snake_case = pipe(**lowerCAmelCase_ ).images[0]
_snake_case = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
_snake_case = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 42 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__snake_case : Optional[int] = 50_000
__snake_case : Dict = 5_000
__snake_case , __snake_case : Union[str, Any] = os.path.split(__file__)
__snake_case : Any = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
for i in range(__snake_case ):
__lowerCAmelCase : Union[str, Any] = dataset[i]
@get_duration
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Dict:
for i in range(0 ,len(__snake_case ) ,__snake_case ):
__lowerCAmelCase : List[str] = dataset[i : i + batch_size]
@get_duration
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Dict:
with dataset.formatted_as(type=__snake_case ):
for i in range(__snake_case ):
__lowerCAmelCase : Union[str, Any] = dataset[i]
@get_duration
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ) -> str:
with dataset.formatted_as(type=__snake_case ):
for i in range(0 ,__snake_case ,__snake_case ):
__lowerCAmelCase : Optional[int] = dataset[i : i + batch_size]
def _lowercase ( ) -> Union[str, Any]:
__lowerCAmelCase : Optional[int] = {"num examples": SPEED_TEST_N_EXAMPLES}
__lowerCAmelCase : Optional[int] = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
__lowerCAmelCase : Any = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
__lowerCAmelCase : int = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
__lowerCAmelCase : str = generate_example_dataset(
os.path.join(__snake_case ,"dataset.arrow" ) ,__snake_case ,num_examples=__snake_case ,seq_shapes={"list": (100,)} ,)
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ ,str(__snake_case ) )
__lowerCAmelCase : str = func(__snake_case ,**__snake_case )
print("shuffling dataset" )
__lowerCAmelCase : Optional[int] = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " ,func.__name__ ,str(__snake_case ) )
__lowerCAmelCase : List[str] = func(
__snake_case ,**__snake_case )
with open(__snake_case ,"wb" ) as f:
f.write(json.dumps(__snake_case ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating() | 269 | 0 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase = 256
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Tuple = ["""melgan"""]
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> None:
super().__init__()
# From MELGAN
__UpperCamelCase :int = math.log(1E-5) # Matches MelGAN training.
__UpperCamelCase :int = 4.0 # Largest value for most examples
__UpperCamelCase :str = 128
self.register_modules(
notes_encoder=__lowercase , continuous_encoder=__lowercase , decoder=__lowercase , scheduler=__lowercase , melgan=__lowercase , )
def UpperCamelCase__ ( self , __lowercase , __lowercase=(-1.0, 1.0) , __lowercase=False) -> Dict:
__UpperCamelCase , __UpperCamelCase :str = output_range
if clip:
__UpperCamelCase :Union[str, Any] = torch.clip(__lowercase , self.min_value , self.max_value)
# Scale to [0, 1].
__UpperCamelCase :Union[str, Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCamelCase__ ( self , __lowercase , __lowercase=(-1.0, 1.0) , __lowercase=False) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase :int = input_range
__UpperCamelCase :Optional[int] = torch.clip(__lowercase , __lowercase , __lowercase) if clip else outputs
# Scale to [0, 1].
__UpperCamelCase :List[str] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> List[Any]:
__UpperCamelCase :List[str] = input_tokens > 0
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = self.notes_encoder(
encoder_input_tokens=__lowercase , encoder_inputs_mask=__lowercase)
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = self.continuous_encoder(
encoder_inputs=__lowercase , encoder_inputs_mask=__lowercase)
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> str:
__UpperCamelCase :Optional[int] = noise_time
if not torch.is_tensor(__lowercase):
__UpperCamelCase :str = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device)
elif torch.is_tensor(__lowercase) and len(timesteps.shape) == 0:
__UpperCamelCase :Dict = timesteps[None].to(input_tokens.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCamelCase :List[str] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device)
__UpperCamelCase :Tuple = self.decoder(
encodings_and_masks=__lowercase , decoder_input_tokens=__lowercase , decoder_noise_time=__lowercase)
return logits
@torch.no_grad()
def __call__( self , __lowercase , __lowercase = None , __lowercase = 100 , __lowercase = True , __lowercase = "numpy" , __lowercase = None , __lowercase = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowercase , __lowercase) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__lowercase)}.""")
__UpperCamelCase :Union[str, Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa)
__UpperCamelCase :Union[str, Any] = np.zeros([1, 0, self.n_dims] , np.floataa)
__UpperCamelCase :Union[str, Any] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__lowercase , device=self.device)
for i, encoder_input_tokens in enumerate(__lowercase):
if i == 0:
__UpperCamelCase :int = torch.from_numpy(pred_mel[:1].copy()).to(
device=self.device , dtype=self.decoder.dtype)
# The first chunk has no previous context.
__UpperCamelCase :int = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__lowercase , device=self.device)
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__UpperCamelCase :Tuple = ones
__UpperCamelCase :Optional[Any] = self.scale_features(
__lowercase , output_range=[-1.0, 1.0] , clip=__lowercase)
__UpperCamelCase :int = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens]).to(device=self.device) , continuous_inputs=__lowercase , continuous_mask=__lowercase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__UpperCamelCase :int = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__lowercase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__lowercase)
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
__UpperCamelCase :Optional[int] = self.decode(
encodings_and_masks=__lowercase , input_tokens=__lowercase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__UpperCamelCase :int = self.scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase).prev_sample
__UpperCamelCase :Tuple = self.scale_to_features(__lowercase , input_range=[-1.0, 1.0])
__UpperCamelCase :List[Any] = mel[:1]
__UpperCamelCase :Optional[Any] = mel.cpu().float().numpy()
__UpperCamelCase :Any = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1)
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowercase , __lowercase)
logger.info('''Generated segment''' , __lowercase)
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''')
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''')
if output_type == "numpy":
__UpperCamelCase :Optional[Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa))
else:
__UpperCamelCase :List[str] = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__lowercase)
| 43 |
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[str]=13 , _SCREAMING_SNAKE_CASE: Tuple=7 , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: str=False , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: int=99 , _SCREAMING_SNAKE_CASE: int=32 , _SCREAMING_SNAKE_CASE: List[str]=5 , _SCREAMING_SNAKE_CASE: Union[str, Any]=4 , _SCREAMING_SNAKE_CASE: int=64 , _SCREAMING_SNAKE_CASE: List[str]="gelu" , _SCREAMING_SNAKE_CASE: str=0.1 , _SCREAMING_SNAKE_CASE: Any=0.1 , _SCREAMING_SNAKE_CASE: Optional[int]=512 , _SCREAMING_SNAKE_CASE: Tuple=16 , _SCREAMING_SNAKE_CASE: Any=2 , _SCREAMING_SNAKE_CASE: List[str]=0.02 , _SCREAMING_SNAKE_CASE: Tuple=3 , _SCREAMING_SNAKE_CASE: Optional[Any]=4 , _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: int=2 , _SCREAMING_SNAKE_CASE: str=2 , _SCREAMING_SNAKE_CASE: Union[str, Any]=2 , _SCREAMING_SNAKE_CASE: List[Any]=2 , _SCREAMING_SNAKE_CASE: int=4 , _SCREAMING_SNAKE_CASE: List[str]=1 , ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = parent
__lowerCAmelCase : Optional[Any] = batch_size
__lowerCAmelCase : Union[str, Any] = seq_length
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : Optional[int] = use_input_mask
__lowerCAmelCase : Dict = use_token_type_ids
__lowerCAmelCase : Dict = use_labels
__lowerCAmelCase : Dict = vocab_size
__lowerCAmelCase : Tuple = hidden_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : Tuple = intermediate_size
__lowerCAmelCase : List[Any] = hidden_act
__lowerCAmelCase : Optional[Any] = hidden_dropout_prob
__lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : Union[str, Any] = type_vocab_size
__lowerCAmelCase : Optional[int] = type_sequence_label_size
__lowerCAmelCase : Dict = initializer_range
__lowerCAmelCase : Tuple = num_labels
__lowerCAmelCase : Optional[Any] = num_choices
__lowerCAmelCase : Union[str, Any] = scope
__lowerCAmelCase : Optional[Any] = q_groups
__lowerCAmelCase : Optional[int] = k_groups
__lowerCAmelCase : Any = v_groups
__lowerCAmelCase : int = post_attention_groups
__lowerCAmelCase : List[str] = intermediate_groups
__lowerCAmelCase : Optional[Any] = output_groups
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
__lowerCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length])
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : str = None
if self.use_labels:
__lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices)
__lowerCAmelCase : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Tuple) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[Any] = SqueezeBertModel(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Tuple) -> Dict:
"""simple docstring"""
__lowerCAmelCase : int = SqueezeBertForMaskedLM(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : str = SqueezeBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Union[str, Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Tuple) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.num_labels
__lowerCAmelCase : Union[str, Any] = SqueezeBertForSequenceClassification(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.num_labels
__lowerCAmelCase : Optional[int] = SqueezeBertForTokenClassification(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.num_choices
__lowerCAmelCase : str = SqueezeBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : int = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase : Union[str, Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase : str = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _SCREAMING_SNAKE_CASE ( self: str) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)) : Union[str, Any] = config_and_inputs
__lowerCAmelCase : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = SqueezeBertModelTester(self)
__lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , dim=37)
def _SCREAMING_SNAKE_CASE ( self: Any) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE)
@slow
def _SCREAMING_SNAKE_CASE ( self: Any) -> Dict:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[Any] = SqueezeBertModel.from_pretrained(_SCREAMING_SNAKE_CASE)
self.assertIsNotNone(_SCREAMING_SNAKE_CASE)
@require_sentencepiece
@require_tokenizers
@require_torch
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: int) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli")
__lowerCAmelCase : List[Any] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]])
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE)[0]
__lowerCAmelCase : Any = torch.Size((1, 3))
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = torch.tensor([[0.6401, -0.0349, -0.6041]])
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-4)) | 269 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_a : Union[str, Any] = logging.get_logger(__name__)
_a : List[Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_a : Optional[int] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
_a : Tuple = {'facebook/blenderbot-3B': 128}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = VOCAB_FILES_NAMES
_UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
_UpperCamelCase : Optional[int] = BlenderbotTokenizer
def __init__( self , a__=None , a__=None , a__=None , a__="replace" , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__=False , a__=True , **a__ , ):
super().__init__(
a__ , a__ , tokenizer_file=a__ , errors=a__ , bos_token=a__ , eos_token=a__ , sep_token=a__ , cls_token=a__ , unk_token=a__ , pad_token=a__ , mask_token=a__ , add_prefix_space=a__ , trim_offsets=a__ , **a__ , )
_lowerCAmelCase : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , a__ ) != add_prefix_space:
_lowerCAmelCase : int = getattr(a__ , pre_tok_state.pop("""type""" ) )
_lowerCAmelCase : Optional[Any] = add_prefix_space
_lowerCAmelCase : List[Any] = pre_tok_class(**a__ )
_lowerCAmelCase : List[str] = add_prefix_space
_lowerCAmelCase : Dict = """post_processor"""
_lowerCAmelCase : List[Any] = getattr(self.backend_tokenizer , a__ , a__ )
if tokenizer_component_instance:
_lowerCAmelCase : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCAmelCase : List[Any] = tuple(state["""sep"""] )
if "cls" in state:
_lowerCAmelCase : Tuple = tuple(state["""cls"""] )
_lowerCAmelCase : Union[str, Any] = False
if state.get("""add_prefix_space""" , a__ ) != add_prefix_space:
_lowerCAmelCase : Any = add_prefix_space
_lowerCAmelCase : Optional[Any] = True
if state.get("""trim_offsets""" , a__ ) != trim_offsets:
_lowerCAmelCase : Any = trim_offsets
_lowerCAmelCase : List[str] = True
if changes_to_apply:
_lowerCAmelCase : Union[str, Any] = getattr(a__ , state.pop("""type""" ) )
_lowerCAmelCase : Union[str, Any] = component_class(**a__ )
setattr(self.backend_tokenizer , a__ , a__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __A ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def __A ( self , a__ ):
_lowerCAmelCase : Optional[Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else value
_lowerCAmelCase : List[str] = value
def __A ( self , *a__ , **a__ ):
_lowerCAmelCase : int = kwargs.get("""is_split_into_words""" , a__ )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a__ , **a__ )
def __A ( self , *a__ , **a__ ):
_lowerCAmelCase : str = kwargs.get("""is_split_into_words""" , a__ )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a__ , **a__ )
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Optional[Any] = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self , a__ , a__ = None ):
return token_ids_a + [self.eos_token_id]
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(a__ )
_lowerCAmelCase : Union[str, Any] = """ """.join(a__ )
_lowerCAmelCase : Tuple = self.encode(a__ )
if len(a__ ) > self.model_max_length:
_lowerCAmelCase : List[Any] = input_ids[-self.model_max_length :]
logger.warning(F"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids
| 44 |
"""simple docstring"""
import os
from math import logaa
def _lowercase ( __snake_case = "base_exp.txt" ) -> int:
__lowerCAmelCase : float = 0
__lowerCAmelCase : Any = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__snake_case ) ,__snake_case ) ) ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = list(map(__snake_case ,line.split("," ) ) )
if x * logaa(__snake_case ) > largest:
__lowerCAmelCase : Tuple = x * logaa(__snake_case )
__lowerCAmelCase : Optional[Any] = i + 1
return result
if __name__ == "__main__":
print(solution()) | 269 | 0 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowercase_ = logging.get_logger(__name__)
# General docstring
lowercase_ = "MobileNetV1Config"
# Base docstring
lowercase_ = "google/mobilenet_v1_1.0_224"
lowercase_ = [1, 1_0_2_4, 7, 7]
# Image classification docstring
lowercase_ = "google/mobilenet_v1_1.0_224"
lowercase_ = "tabby, tabby cat"
lowercase_ = [
"google/mobilenet_v1_1.0_224",
"google/mobilenet_v1_0.75_192",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int]=None ) -> int:
__a = {}
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = model.mobilenet_va
else:
__a = model
__a = '''MobilenetV1/Conv2d_0/'''
__a = backbone.conv_stem.convolution.weight
__a = backbone.conv_stem.normalization.bias
__a = backbone.conv_stem.normalization.weight
__a = backbone.conv_stem.normalization.running_mean
__a = backbone.conv_stem.normalization.running_var
for i in range(13 ):
__a = i + 1
__a = i * 2
__a = backbone.layer[pt_index]
__a = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
__a = pointer.convolution.weight
__a = pointer.normalization.bias
__a = pointer.normalization.weight
__a = pointer.normalization.running_mean
__a = pointer.normalization.running_var
__a = backbone.layer[pt_index + 1]
__a = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
__a = pointer.convolution.weight
__a = pointer.normalization.bias
__a = pointer.normalization.weight
__a = pointer.normalization.running_mean
__a = pointer.normalization.running_var
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
__a = model.classifier.weight
__a = model.classifier.bias
return tf_to_pt_map
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int ) -> Tuple:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
__a = tf.train.list_variables(lowerCAmelCase__ )
__a = {}
for name, shape in init_vars:
logger.info(f'''Loading TF weight {name} with shape {shape}''' )
__a = tf.train.load_variable(lowerCAmelCase__ , lowerCAmelCase__ )
__a = array
# Build TF to PyTorch weights loading map
__a = _build_tf_to_pytorch_map(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for name, pointer in tf_to_pt_map.items():
logger.info(f'''Importing {name}''' )
if name not in tf_weights:
logger.info(f'''{name} not in tf pre-trained weights, skipping''' )
continue
__a = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
__a = np.transpose(lowerCAmelCase__ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
__a = array.squeeze().transpose()
else:
__a = np.transpose(lowerCAmelCase__ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(f'''Initialize PyTorch weight {name} {array.shape}''' )
__a = torch.from_numpy(lowerCAmelCase__ )
tf_weights.pop(lowerCAmelCase__ , lowerCAmelCase__ )
tf_weights.pop(name + '''/RMSProp''' , lowerCAmelCase__ )
tf_weights.pop(name + '''/RMSProp_1''' , lowerCAmelCase__ )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , lowerCAmelCase__ )
logger.info(f'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' )
return model
def lowercase ( lowerCAmelCase__ : torch.Tensor , lowerCAmelCase__ : nn.Convad ) -> torch.Tensor:
__a , __a = features.shape[-2:]
__a , __a = conv_layer.stride
__a , __a = conv_layer.kernel_size
if in_height % stride_height == 0:
__a = max(kernel_height - stride_height , 0 )
else:
__a = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__a = max(kernel_width - stride_width , 0 )
else:
__a = max(kernel_width - (in_width % stride_width) , 0 )
__a = pad_along_width // 2
__a = pad_along_width - pad_left
__a = pad_along_height // 2
__a = pad_along_height - pad_top
__a = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowerCAmelCase__ , lowerCAmelCase__ , '''constant''' , 0.0 )
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a = 1 , _a = 1 , _a = False , _a = True , _a = True , ):
super().__init__()
__a = config
if in_channels % groups != 0:
raise ValueError(f'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(f'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
__a = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
__a = nn.Convad(
in_channels=_a , out_channels=_a , kernel_size=_a , stride=_a , padding=_a , groups=_a , bias=_a , padding_mode='''zeros''' , )
if use_normalization:
__a = nn.BatchNormad(
num_features=_a , eps=config.layer_norm_eps , momentum=0.9997 , affine=_a , track_running_stats=_a , )
else:
__a = None
if use_activation:
if isinstance(_a , _a ):
__a = ACTaFN[use_activation]
elif isinstance(config.hidden_act , _a ):
__a = ACTaFN[config.hidden_act]
else:
__a = config.hidden_act
else:
__a = None
def __UpperCAmelCase ( self , _a ):
if self.config.tf_padding:
__a = apply_tf_padding(_a , self.convolution )
__a = self.convolution(_a )
if self.normalization is not None:
__a = self.normalization(_a )
if self.activation is not None:
__a = self.activation(_a )
return features
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = MobileNetVaConfig
__UpperCAmelCase : Optional[int] = load_tf_weights_in_mobilenet_va
__UpperCAmelCase : Optional[Any] = 'mobilenet_v1'
__UpperCAmelCase : Tuple = 'pixel_values'
__UpperCAmelCase : int = False
def __UpperCAmelCase ( self , _a ):
if isinstance(_a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_a , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
lowercase_ = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
lowercase_ = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.' , __SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a = True ):
super().__init__(_a )
__a = config
__a = 32
__a = max(int(depth * config.depth_multiplier ) , config.min_depth )
__a = MobileNetVaConvLayer(
_a , in_channels=config.num_channels , out_channels=_a , kernel_size=3 , stride=2 , )
__a = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__a = nn.ModuleList()
for i in range(13 ):
__a = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__a = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
_a , in_channels=_a , out_channels=_a , kernel_size=3 , stride=strides[i] , groups=_a , ) )
self.layer.append(
MobileNetVaConvLayer(
_a , in_channels=_a , out_channels=_a , kernel_size=1 , ) )
__a = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __UpperCAmelCase ( self , _a ):
raise NotImplementedError
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_a , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCAmelCase ( self , _a = None , _a = None , _a = None , ):
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
__a = self.conv_stem(_a )
__a = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__a = layer_module(_a )
if output_hidden_states:
__a = all_hidden_states + (hidden_states,)
__a = hidden_states
if self.pooler is not None:
__a = torch.flatten(self.pooler(_a ) , start_dim=1 )
else:
__a = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_a , pooler_output=_a , hidden_states=_a , )
@add_start_docstrings(
'\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a ):
super().__init__(_a )
__a = config.num_labels
__a = MobileNetVaModel(_a )
__a = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__a = nn.Dropout(config.classifier_dropout_prob , inplace=_a )
__a = nn.Linear(_a , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCAmelCase ( self , _a = None , _a = None , _a = None , _a = None , ):
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = self.mobilenet_va(_a , output_hidden_states=_a , return_dict=_a )
__a = outputs.pooler_output if return_dict else outputs[1]
__a = self.classifier(self.dropout(_a ) )
__a = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__a = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__a = '''single_label_classification'''
else:
__a = '''multi_label_classification'''
if self.config.problem_type == "regression":
__a = MSELoss()
if self.num_labels == 1:
__a = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__a = loss_fct(_a , _a )
elif self.config.problem_type == "single_label_classification":
__a = CrossEntropyLoss()
__a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__a = BCEWithLogitsLoss()
__a = loss_fct(_a , _a )
if not return_dict:
__a = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=_a , logits=_a , hidden_states=outputs.hidden_states , )
| 45 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _lowercase ( __snake_case ) -> List[str]:
if isinstance(__snake_case ,collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class A__ :
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Optional[Any]) -> str:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: str) -> int:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Tuple:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: float) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : str = np.abs((a - b)).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , F"""Difference between torch and flax is {diff} (>= {tol}).""")
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Any=None , **_SCREAMING_SNAKE_CASE: Tuple) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim))
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Any]=None , **_SCREAMING_SNAKE_CASE: List[str]) -> Dict:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim))
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any]=None , **_SCREAMING_SNAKE_CASE: Tuple) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = after_output[0]
__lowerCAmelCase : Any = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Any=None , **_SCREAMING_SNAKE_CASE: List[str]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Any = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model(
input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase : List[str] = to_atuple(vision_model.config.image_size)
__lowerCAmelCase : Any = to_atuple(vision_model.config.patch_size)
__lowerCAmelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCAmelCase : Tuple = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
__lowerCAmelCase : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: int) -> str:
"""simple docstring"""
pt_model.to(_SCREAMING_SNAKE_CASE)
pt_model.eval()
# prepare inputs
__lowerCAmelCase : Union[str, Any] = inputs_dict
__lowerCAmelCase : Union[str, Any] = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowerCAmelCase : Any = pt_model(**_SCREAMING_SNAKE_CASE).to_tuple()
__lowerCAmelCase : List[Any] = fx_model(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2)
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = fx_model_loaded(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2)
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE)
pt_model_loaded.to(_SCREAMING_SNAKE_CASE)
pt_model_loaded.eval()
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = pt_model_loaded(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output_loaded.numpy() , 4e-2)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Dict) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = fx_state
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Dict) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params)
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Dict) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.prepare_config_and_inputs()
self.check_save_load(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: int) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_SCREAMING_SNAKE_CASE)
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Dict = self.prepare_config_and_inputs()
__lowerCAmelCase : List[Any] = config_inputs_dict.pop("vision_config")
__lowerCAmelCase : str = config_inputs_dict.pop("text_config")
__lowerCAmelCase : Union[str, Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.check_equivalence_flax_to_pt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
@slow
def _SCREAMING_SNAKE_CASE ( self: str) -> Dict:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Dict = self.get_pretrained_model_and_inputs()
__lowerCAmelCase : Union[str, Any] = model_a(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model_a(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = after_outputs[0]
__lowerCAmelCase : List[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-5)
@require_flax
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = 13
__lowerCAmelCase : Optional[int] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
__lowerCAmelCase : List[Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
__lowerCAmelCase : List[Any] = random_attention_mask([batch_size, 4])
__lowerCAmelCase : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[str] = FlaxViTModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = FlaxBertModel(_SCREAMING_SNAKE_CASE)
return vision_model, text_model
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = FlaxViTModelTester(self)
__lowerCAmelCase : Optional[Any] = FlaxBertModelTester(self)
__lowerCAmelCase : int = vit_model_tester.prepare_config_and_inputs()
__lowerCAmelCase : List[str] = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase : Tuple = vision_config_and_inputs
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = 13
__lowerCAmelCase : List[str] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
__lowerCAmelCase : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
__lowerCAmelCase : str = random_attention_mask([batch_size, 4])
__lowerCAmelCase : Optional[int] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : int = FlaxCLIPVisionModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = FlaxBertModel(_SCREAMING_SNAKE_CASE)
return vision_model, text_model
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = FlaxCLIPVisionModelTester(self)
__lowerCAmelCase : str = FlaxBertModelTester(self)
__lowerCAmelCase : Optional[Any] = clip_model_tester.prepare_config_and_inputs()
__lowerCAmelCase : Dict = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase : Any = vision_config_and_inputs
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0)
__lowerCAmelCase : str = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian")
__lowerCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
__lowerCAmelCase : Optional[int] = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="np")
__lowerCAmelCase : List[str] = model(**_SCREAMING_SNAKE_CASE)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowerCAmelCase : List[str] = np.array([[1.228_4727, 0.310_4122]])
self.assertTrue(np.allclose(outputs.logits_per_image , _SCREAMING_SNAKE_CASE , atol=1e-3)) | 269 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=18 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=False , lowercase=True , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 20}
lowerCAmelCase = do_thumbnail
lowerCAmelCase = do_align_axis
lowerCAmelCase = do_pad
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean
lowerCAmelCase = image_std
def _snake_case ( self ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = DonutImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = DonutImageProcessingTester(self )
@property
def _snake_case ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , """do_resize""" ) )
self.assertTrue(hasattr(lowercase , """size""" ) )
self.assertTrue(hasattr(lowercase , """do_thumbnail""" ) )
self.assertTrue(hasattr(lowercase , """do_align_long_axis""" ) )
self.assertTrue(hasattr(lowercase , """do_pad""" ) )
self.assertTrue(hasattr(lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(lowercase , """image_mean""" ) )
self.assertTrue(hasattr(lowercase , """image_std""" ) )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def _snake_case ( self ) -> Tuple:
pass
@is_flaky()
def _snake_case ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def _snake_case ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def _snake_case ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 46 |
"""simple docstring"""
from __future__ import annotations
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ) -> list:
__lowerCAmelCase : Dict = []
__lowerCAmelCase , __lowerCAmelCase : Any = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__lowerCAmelCase : int = result + left + right
return input_list
def _lowercase ( __snake_case ) -> list:
if len(__snake_case ) <= 1:
return input_list
__lowerCAmelCase : int = list(__snake_case )
# iteration for two-way merging
__lowerCAmelCase : Optional[int] = 2
while p <= len(__snake_case ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 ,len(__snake_case ) ,__snake_case ):
__lowerCAmelCase : Union[str, Any] = i
__lowerCAmelCase : Tuple = i + p - 1
__lowerCAmelCase : Optional[Any] = (low + high + 1) // 2
__lowerCAmelCase : Any = merge(__snake_case ,__snake_case ,__snake_case ,__snake_case )
# final merge of last two parts
if p * 2 >= len(__snake_case ):
__lowerCAmelCase : Optional[Any] = i
__lowerCAmelCase : Union[str, Any] = merge(__snake_case ,0 ,__snake_case ,len(__snake_case ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__snake_case : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
__snake_case : Optional[int] = []
else:
__snake_case : int = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted)) | 269 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=A__ ):
A__ = ['torch', 'scipy']
def __init__( self : Optional[Any] , *_a : Any , **_a : Tuple ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['torch', 'scipy'] )
@classmethod
def A ( cls : Any , *_a : Optional[int] , **_a : Tuple ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch', 'scipy'] )
@classmethod
def A ( cls : Any , *_a : Tuple , **_a : List[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch', 'scipy'] )
| 47 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small")
__lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("google/mt5-small")
__lowerCAmelCase : Tuple = tokenizer("Hello there" , return_tensors="np").input_ids
__lowerCAmelCase : Dict = tokenizer("Hi I am" , return_tensors="np").input_ids
__lowerCAmelCase : str = shift_tokens_right(_SCREAMING_SNAKE_CASE , model.config.pad_token_id , model.config.decoder_start_token_id)
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE).logits
__lowerCAmelCase : int = optax.softmax_cross_entropy(_SCREAMING_SNAKE_CASE , onehot(_SCREAMING_SNAKE_CASE , logits.shape[-1])).mean()
__lowerCAmelCase : List[str] = -(labels.shape[-1] * loss.item())
__lowerCAmelCase : str = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) | 269 | 0 |
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=1 ) -> Optional[int]:
if n_shave_prefix_segments >= 0:
return ".".join(path.split("." )[n_shave_prefix_segments:] )
else:
return ".".join(path.split("." )[:n_shave_prefix_segments] )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=0 ) -> Optional[int]:
lowerCamelCase : Optional[Any] = []
for old_item in old_list:
lowerCamelCase : Dict = old_item.replace("in_layers.0" ,"norm1" )
lowerCamelCase : int = new_item.replace("in_layers.2" ,"conv1" )
lowerCamelCase : Any = new_item.replace("out_layers.0" ,"norm2" )
lowerCamelCase : Optional[Any] = new_item.replace("out_layers.3" ,"conv2" )
lowerCamelCase : List[Any] = new_item.replace("emb_layers.1" ,"time_emb_proj" )
lowerCamelCase : int = new_item.replace("skip_connection" ,"conv_shortcut" )
lowerCamelCase : Optional[int] = shave_segments(_SCREAMING_SNAKE_CASE ,n_shave_prefix_segments=_SCREAMING_SNAKE_CASE )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=0 ) -> Optional[Any]:
lowerCamelCase : List[Any] = []
for old_item in old_list:
lowerCamelCase : int = old_item
lowerCamelCase : int = new_item.replace("norm.weight" ,"group_norm.weight" )
lowerCamelCase : Optional[Any] = new_item.replace("norm.bias" ,"group_norm.bias" )
lowerCamelCase : Union[str, Any] = new_item.replace("proj_out.weight" ,"proj_attn.weight" )
lowerCamelCase : int = new_item.replace("proj_out.bias" ,"proj_attn.bias" )
lowerCamelCase : List[Any] = shave_segments(_SCREAMING_SNAKE_CASE ,n_shave_prefix_segments=_SCREAMING_SNAKE_CASE )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ) -> Dict:
assert isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowerCamelCase : Tuple = old_checkpoint[path]
lowerCamelCase : Optional[int] = old_tensor.shape[0] // 3
lowerCamelCase : Dict = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowerCamelCase : List[str] = old_tensor.shape[0] // config["num_head_channels"] // 3
lowerCamelCase : Tuple = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = old_tensor.split(channels // num_heads ,dim=1 )
lowerCamelCase : int = query.reshape(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = key.reshape(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = value.reshape(_SCREAMING_SNAKE_CASE )
for path in paths:
lowerCamelCase : Optional[Any] = path["new"]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowerCamelCase : Optional[int] = new_path.replace("middle_block.0" ,"mid_block.resnets.0" )
lowerCamelCase : Optional[int] = new_path.replace("middle_block.1" ,"mid_block.attentions.0" )
lowerCamelCase : Optional[Any] = new_path.replace("middle_block.2" ,"mid_block.resnets.1" )
if additional_replacements is not None:
for replacement in additional_replacements:
lowerCamelCase : str = new_path.replace(replacement["old"] ,replacement["new"] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowerCamelCase : Optional[Any] = old_checkpoint[path["old"]][:, :, 0]
else:
lowerCamelCase : int = old_checkpoint[path["old"]]
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[str]:
lowerCamelCase : Union[str, Any] = {}
lowerCamelCase : str = checkpoint["time_embed.0.weight"]
lowerCamelCase : str = checkpoint["time_embed.0.bias"]
lowerCamelCase : Tuple = checkpoint["time_embed.2.weight"]
lowerCamelCase : Any = checkpoint["time_embed.2.bias"]
lowerCamelCase : Any = checkpoint["input_blocks.0.0.weight"]
lowerCamelCase : Tuple = checkpoint["input_blocks.0.0.bias"]
lowerCamelCase : Dict = checkpoint["out.0.weight"]
lowerCamelCase : Dict = checkpoint["out.0.bias"]
lowerCamelCase : Union[str, Any] = checkpoint["out.2.weight"]
lowerCamelCase : Optional[int] = checkpoint["out.2.bias"]
# Retrieves the keys for the input blocks only
lowerCamelCase : List[Any] = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} )
lowerCamelCase : Any = {
layer_id: [key for key in checkpoint if f'''input_blocks.{layer_id}''' in key]
for layer_id in range(_SCREAMING_SNAKE_CASE )
}
# Retrieves the keys for the middle blocks only
lowerCamelCase : str = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} )
lowerCamelCase : str = {
layer_id: [key for key in checkpoint if f'''middle_block.{layer_id}''' in key]
for layer_id in range(_SCREAMING_SNAKE_CASE )
}
# Retrieves the keys for the output blocks only
lowerCamelCase : List[Any] = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} )
lowerCamelCase : int = {
layer_id: [key for key in checkpoint if f'''output_blocks.{layer_id}''' in key]
for layer_id in range(_SCREAMING_SNAKE_CASE )
}
for i in range(1 ,_SCREAMING_SNAKE_CASE ):
lowerCamelCase : int = (i - 1) // (config["num_res_blocks"] + 1)
lowerCamelCase : int = (i - 1) % (config["num_res_blocks"] + 1)
lowerCamelCase : Any = [key for key in input_blocks[i] if f'''input_blocks.{i}.0''' in key]
lowerCamelCase : Optional[Any] = [key for key in input_blocks[i] if f'''input_blocks.{i}.1''' in key]
if f'''input_blocks.{i}.0.op.weight''' in checkpoint:
lowerCamelCase : List[str] = checkpoint[
f'''input_blocks.{i}.0.op.weight'''
]
lowerCamelCase : Dict = checkpoint[
f'''input_blocks.{i}.0.op.bias'''
]
continue
lowerCamelCase : Optional[int] = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = {"old": f'''input_blocks.{i}.0''', "new": f'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
lowerCamelCase : int = {"old": "resnets.2.op", "new": "downsamplers.0.op"}
assign_to_checkpoint(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,additional_replacements=[meta_path, resnet_op] ,config=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : List[str] = renew_attention_paths(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = {
"old": f'''input_blocks.{i}.1''',
"new": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowerCamelCase : Tuple = {
f'''input_blocks.{i}.1.qkv.bias''': {
"key": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"query": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"value": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
f'''input_blocks.{i}.1.qkv.weight''': {
"key": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"query": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"value": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,additional_replacements=[meta_path] ,attention_paths_to_split=_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE ,)
lowerCamelCase : List[str] = middle_blocks[0]
lowerCamelCase : str = middle_blocks[1]
lowerCamelCase : Tuple = middle_blocks[2]
lowerCamelCase : int = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
assign_to_checkpoint(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
assign_to_checkpoint(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = renew_attention_paths(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = {
"middle_block.1.qkv.bias": {
"key": "mid_block.attentions.0.key.bias",
"query": "mid_block.attentions.0.query.bias",
"value": "mid_block.attentions.0.value.bias",
},
"middle_block.1.qkv.weight": {
"key": "mid_block.attentions.0.key.weight",
"query": "mid_block.attentions.0.query.weight",
"value": "mid_block.attentions.0.value.weight",
},
}
assign_to_checkpoint(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,attention_paths_to_split=_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : List[str] = i // (config["num_res_blocks"] + 1)
lowerCamelCase : Optional[Any] = i % (config["num_res_blocks"] + 1)
lowerCamelCase : List[str] = [shave_segments(_SCREAMING_SNAKE_CASE ,2 ) for name in output_blocks[i]]
lowerCamelCase : Optional[int] = {}
for layer in output_block_layers:
lowerCamelCase , lowerCamelCase : List[str] = layer.split("." )[0], shave_segments(_SCREAMING_SNAKE_CASE ,1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase : Optional[int] = [layer_name]
if len(_SCREAMING_SNAKE_CASE ) > 1:
lowerCamelCase : Optional[int] = [key for key in output_blocks[i] if f'''output_blocks.{i}.0''' in key]
lowerCamelCase : Any = [key for key in output_blocks[i] if f'''output_blocks.{i}.1''' in key]
lowerCamelCase : Optional[int] = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = {"old": f'''output_blocks.{i}.0''', "new": f'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,additional_replacements=[meta_path] ,config=_SCREAMING_SNAKE_CASE )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowerCamelCase : Optional[int] = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] )
lowerCamelCase : Dict = checkpoint[
f'''output_blocks.{i}.{index}.conv.weight'''
]
lowerCamelCase : List[Any] = checkpoint[
f'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(_SCREAMING_SNAKE_CASE ) == 2:
lowerCamelCase : List[str] = []
if len(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : Optional[Any] = renew_attention_paths(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = {
"old": f'''output_blocks.{i}.1''',
"new": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowerCamelCase : Any = {
f'''output_blocks.{i}.1.qkv.bias''': {
"key": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"query": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"value": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
f'''output_blocks.{i}.1.qkv.weight''': {
"key": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"query": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"value": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,additional_replacements=[meta_path] ,attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None ,config=_SCREAMING_SNAKE_CASE ,)
else:
lowerCamelCase : List[str] = renew_resnet_paths(_SCREAMING_SNAKE_CASE ,n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowerCamelCase : Tuple = ".".join(["output_blocks", str(_SCREAMING_SNAKE_CASE ), path["old"]] )
lowerCamelCase : Optional[int] = ".".join(["up_blocks", str(_SCREAMING_SNAKE_CASE ), "resnets", str(_SCREAMING_SNAKE_CASE ), path["new"]] )
lowerCamelCase : Any = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
SCREAMING_SNAKE_CASE__ : List[Any] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
SCREAMING_SNAKE_CASE__ : int = json.loads(f.read())
SCREAMING_SNAKE_CASE__ : str = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
SCREAMING_SNAKE_CASE__ : List[Any] = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
SCREAMING_SNAKE_CASE__ : int = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
SCREAMING_SNAKE_CASE__ : Union[str, Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 48 |
"""simple docstring"""
import re
def _lowercase ( __snake_case ) -> str:
if len(re.findall("[ATCG]" ,__snake_case ) ) != len(__snake_case ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" ,"TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 269 | 0 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
__a = AlbertConfig.from_json_file(_UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
__a = AlbertForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
__snake_case :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case :List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 49 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = FunnelTokenizer
SCREAMING_SNAKE_CASE = FunnelTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Optional[int]:
"""simple docstring"""
super().setUp()
__lowerCAmelCase : str = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , **_SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any , **_SCREAMING_SNAKE_CASE: Any) -> str:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: str) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = "UNwant\u00E9d,running"
__lowerCAmelCase : str = "unwanted, running"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Any = self.tokenizer_class(self.vocab_file)
__lowerCAmelCase : Any = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE) , [7, 4, 5, 10, 8, 9])
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.get_tokenizers(do_lower_case=_SCREAMING_SNAKE_CASE)
for tokenizer in tokenizers:
__lowerCAmelCase : List[str] = tokenizer("UNwant\u00E9d,running")
__lowerCAmelCase : Optional[int] = len(inputs["input_ids"]) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len)
__lowerCAmelCase : List[str] = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running")
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len) | 269 | 0 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Tuple = logging.get_logger()
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True ) -> int:
print(F"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowerCamelCase__ : Union[str, Any] = timm.create_model('levit_128s' , pretrained=_UpperCAmelCase )
else:
lowerCamelCase__ : int = timm.create_model('levit_128' , pretrained=_UpperCAmelCase )
if hidden_sizes == 192:
lowerCamelCase__ : List[str] = timm.create_model('levit_192' , pretrained=_UpperCAmelCase )
if hidden_sizes == 256:
lowerCamelCase__ : List[Any] = timm.create_model('levit_256' , pretrained=_UpperCAmelCase )
if hidden_sizes == 384:
lowerCamelCase__ : int = timm.create_model('levit_384' , pretrained=_UpperCAmelCase )
from_model.eval()
lowerCamelCase__ : Optional[Any] = LevitForImageClassificationWithTeacher(_UpperCAmelCase ).eval()
lowerCamelCase__ : List[str] = OrderedDict()
lowerCamelCase__ : str = from_model.state_dict()
lowerCamelCase__ : Optional[Any] = list(from_model.state_dict().keys() )
lowerCamelCase__ : str = list(our_model.state_dict().keys() )
print(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for i in range(len(_UpperCAmelCase ) ):
lowerCamelCase__ : List[Any] = weights[og_keys[i]]
our_model.load_state_dict(_UpperCAmelCase )
lowerCamelCase__ : int = torch.randn((2, 3, 224, 224) )
lowerCamelCase__ : Optional[int] = from_model(_UpperCAmelCase )
lowerCamelCase__ : Any = our_model(_UpperCAmelCase ).logits
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase ), "The model logits don't match the original one."
lowerCamelCase__ : Optional[Any] = name
print(_UpperCAmelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowerCamelCase__ : int = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"""Pushed {checkpoint_name}""" )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True ) -> List[str]:
lowerCamelCase__ : Optional[int] = 'imagenet-1k-id2label.json'
lowerCamelCase__ : Tuple = 1000
lowerCamelCase__ : Union[str, Any] = (1, num_labels)
lowerCamelCase__ : List[str] = 'huggingface/label-files'
lowerCamelCase__ : int = num_labels
lowerCamelCase__ : Tuple = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ : List[Any] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : List[Any] = idalabel
lowerCamelCase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : Union[str, Any] = partial(_UpperCAmelCase , num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
lowerCamelCase__ : int = {
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
lowerCamelCase__ : int = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , _UpperCAmelCase , names_to_config[model_name] , _UpperCAmelCase , _UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
_UpperCAmelCase : Optional[Any] = parser.parse_args()
_UpperCAmelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 50 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _lowercase ( __snake_case = "laptop" ) -> DataFrame:
__lowerCAmelCase : str = F"""https://www.amazon.in/laptop/s?k={product}"""
__lowerCAmelCase : Union[str, Any] = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
__lowerCAmelCase : List[str] = BeautifulSoup(requests.get(__snake_case ,headers=__snake_case ).text )
# Initialize a Pandas dataframe with the column titles
__lowerCAmelCase : Dict = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" ,attrs={"class": "s-result-item", "data-component-type": "s-search-result"} ,) ,soup.find_all("div" ,attrs={"class": "a-row a-size-base a-color-base"} ) ,):
try:
__lowerCAmelCase : Any = item.ha.text
__lowerCAmelCase : Union[str, Any] = "https://www.amazon.in/" + item.ha.a["href"]
__lowerCAmelCase : Any = item.find("span" ,attrs={"class": "a-offscreen"} ).text
try:
__lowerCAmelCase : Union[str, Any] = item.find("span" ,attrs={"class": "a-icon-alt"} ).text
except AttributeError:
__lowerCAmelCase : Optional[Any] = "Not available"
try:
__lowerCAmelCase : Union[str, Any] = (
"₹"
+ item.find(
"span" ,attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
__lowerCAmelCase : Dict = ""
try:
__lowerCAmelCase : str = float(
(
(
float(product_mrp.strip("₹" ).replace("," ,"" ) )
- float(product_price.strip("₹" ).replace("," ,"" ) )
)
/ float(product_mrp.strip("₹" ).replace("," ,"" ) )
)
* 100 )
except ValueError:
__lowerCAmelCase : List[str] = float("nan" )
except AttributeError:
pass
__lowerCAmelCase : int = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__lowerCAmelCase : Union[str, Any] = " "
__lowerCAmelCase : Union[str, Any] = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__snake_case : Any = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""") | 269 | 0 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
snake_case_ : Optional[Any] = False
class __snake_case ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : int):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''')
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = '''A painting of a squirrel eating a burger '''
UpperCAmelCase_ = torch.manual_seed(0)
UpperCAmelCase_ = pipe(
prompt=_snake_case , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''').images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_snake_case)
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(_snake_case)
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = generator.manual_seed(0)
UpperCAmelCase_ = pipe(
prompt=_snake_case , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''').images
assert np.abs(image - new_image).sum() < 1e-5, "Models don't have the same forward pass"
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa)
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = '''A painting of a squirrel eating a burger '''
UpperCAmelCase_ = torch.manual_seed(0)
UpperCAmelCase_ = pipe(
prompt=_snake_case , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''').images
UpperCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 51 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=_SCREAMING_SNAKE_CASE , )
assert hasattr(self , "env")
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Any=1) -> Dict:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=_SCREAMING_SNAKE_CASE , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: List[Any]) -> Optional[Any]:
"""simple docstring"""
TrainingJobAnalytics(_SCREAMING_SNAKE_CASE).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""")
def _SCREAMING_SNAKE_CASE ( self: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__lowerCAmelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
__lowerCAmelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
__lowerCAmelCase : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCAmelCase : Tuple = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 99_9999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , _SCREAMING_SNAKE_CASE) | 269 | 0 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__lowerCamelCase : Optional[int] = 3
def A_ ( _lowerCAmelCase ) -> int:
print("Generating primitive root of p" )
while True:
UpperCamelCase : Tuple = random.randrange(3 , _lowerCAmelCase )
if pow(_lowerCAmelCase , 2 , _lowerCAmelCase ) == 1:
continue
if pow(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) == 1:
continue
return g
def A_ ( _lowerCAmelCase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("Generating prime p..." )
UpperCamelCase : Tuple = rabin_miller.generate_large_prime(_lowerCAmelCase ) # select large prime number.
UpperCamelCase : Union[str, Any] = primitive_root(_lowerCAmelCase ) # one primitive root on modulo p.
UpperCamelCase : Any = random.randrange(3 , _lowerCAmelCase ) # private_key -> have to be greater than 2 for safety.
UpperCamelCase : List[Any] = cryptomath.find_mod_inverse(pow(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
UpperCamelCase : Union[str, Any] = (key_size, e_a, e_a, p)
UpperCamelCase : Tuple = (key_size, d)
return public_key, private_key
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> None:
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
UpperCamelCase , UpperCamelCase : str = generate_key(_lowerCAmelCase )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , "w" ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , "w" ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def A_ ( ) -> None:
print("Making key files..." )
make_key_files("elgamal" , 2048 )
print("Key files generation successful" )
if __name__ == "__main__":
main()
| 52 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__snake_case : Optional[int] = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 269 | 0 |
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
a__ : Tuple =np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
a__ : Tuple =[0, 25, 50]
a__ : List[str] =[25, 50, 75]
a__ : List[Any] =fuzz.membership.trimf(X, abca)
a__ : List[str] =fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
a__ : Dict =np.ones(75)
a__ : Tuple =np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
a__ : Optional[int] =fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
a__ : Union[str, Any] =fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
a__ : str =fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
a__ : Optional[Any] =fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
a__ : List[str] =young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
a__ : Optional[int] =young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
a__ : Union[str, Any] =fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
a__ : int =fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 53 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case : Optional[int] = logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'maskformer-swin'
SCREAMING_SNAKE_CASE = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: int=224 , _SCREAMING_SNAKE_CASE: Tuple=4 , _SCREAMING_SNAKE_CASE: int=3 , _SCREAMING_SNAKE_CASE: List[Any]=96 , _SCREAMING_SNAKE_CASE: Union[str, Any]=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE: Any=[3, 6, 12, 24] , _SCREAMING_SNAKE_CASE: List[str]=7 , _SCREAMING_SNAKE_CASE: List[str]=4.0 , _SCREAMING_SNAKE_CASE: Optional[int]=True , _SCREAMING_SNAKE_CASE: Tuple=0.0 , _SCREAMING_SNAKE_CASE: Any=0.0 , _SCREAMING_SNAKE_CASE: Any=0.1 , _SCREAMING_SNAKE_CASE: str="gelu" , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: Union[str, Any]=0.02 , _SCREAMING_SNAKE_CASE: str=1e-5 , _SCREAMING_SNAKE_CASE: Optional[int]=None , _SCREAMING_SNAKE_CASE: str=None , **_SCREAMING_SNAKE_CASE: Union[str, Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = image_size
__lowerCAmelCase : Any = patch_size
__lowerCAmelCase : Tuple = num_channels
__lowerCAmelCase : Any = embed_dim
__lowerCAmelCase : Any = depths
__lowerCAmelCase : Dict = len(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = num_heads
__lowerCAmelCase : Tuple = window_size
__lowerCAmelCase : Dict = mlp_ratio
__lowerCAmelCase : Any = qkv_bias
__lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
__lowerCAmelCase : int = attention_probs_dropout_prob
__lowerCAmelCase : Tuple = drop_path_rate
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : Optional[int] = use_absolute_embeddings
__lowerCAmelCase : List[str] = layer_norm_eps
__lowerCAmelCase : Any = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCAmelCase : Optional[Any] = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE) - 1))
__lowerCAmelCase : Any = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(_SCREAMING_SNAKE_CASE) + 1)]
__lowerCAmelCase , __lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names) | 269 | 0 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
a__ : List[str] = True
except (ImportError, ModuleNotFoundError):
a__ : int = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
re.sub("<n>" , "" , lowerCAmelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowerCAmelCase_ ) )
| 54 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class A__ :
'''simple docstring'''
def __init__( self: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = psutil.Process()
__lowerCAmelCase : str = False
def _SCREAMING_SNAKE_CASE ( self: int) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = -1
while True:
__lowerCAmelCase : str = max(self.process.memory_info().rss , self.cpu_memory_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : str = threading.Thread(target=self.peak_monitor)
__lowerCAmelCase : Tuple = True
self.thread.start()
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = False
self.thread.join()
return self.cpu_memory_peak
__snake_case : Tuple = PeakCPUMemory()
def _lowercase ( ) -> str:
# Time
__lowerCAmelCase : str = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase : Optional[Any] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase : Union[str, Any] = torch.cuda.memory_allocated(__snake_case )
torch.cuda.reset_peak_memory_stats()
return measures
def _lowercase ( __snake_case ) -> Optional[Any]:
# Time
__lowerCAmelCase : str = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase : str = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
__lowerCAmelCase : List[str] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase : Union[str, Any] = (torch.cuda.memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**20
__lowerCAmelCase : Any = (torch.cuda.max_memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**20
return measures
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
print(F"""{description}:""" )
print(F"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(__snake_case )]:.2f}MiB""" )
__lowerCAmelCase : Optional[Any] = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" ) | 269 | 0 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case :
"""simple docstring"""
@staticmethod
def snake_case ( *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
lowerCamelCase_ = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = object_detector(examples[0] , threshold=0.0 )
lowerCamelCase_ = len(UpperCamelCase )
self.assertGreater(UpperCamelCase , 0 )
self.assertEqual(
UpperCamelCase , [
{
"score": ANY(UpperCamelCase ),
"label": ANY(UpperCamelCase ),
"box": {"xmin": ANY(UpperCamelCase ), "ymin": ANY(UpperCamelCase ), "xmax": ANY(UpperCamelCase ), "ymax": ANY(UpperCamelCase )},
}
for i in range(UpperCamelCase )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def snake_case ( self ):
"""simple docstring"""
pass
@require_torch
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
lowerCamelCase_ = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.7_235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6_748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6_419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
] , )
lowerCamelCase_ = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{"score": 0.7_235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6_748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6_419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
]
] , )
@require_torch
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = pipeline("zero-shot-object-detection" )
lowerCamelCase_ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
] , )
lowerCamelCase_ = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
[
{"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def snake_case ( self ):
"""simple docstring"""
pass
@require_torch
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = 0.2
lowerCamelCase_ = pipeline("zero-shot-object-detection" )
lowerCamelCase_ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=UpperCamelCase , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
] , )
@require_torch
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = 2
lowerCamelCase_ = pipeline("zero-shot-object-detection" )
lowerCamelCase_ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=UpperCamelCase , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
] , )
| 55 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Any = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'mctct'
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: str=8065 , _SCREAMING_SNAKE_CASE: str=1536 , _SCREAMING_SNAKE_CASE: str=36 , _SCREAMING_SNAKE_CASE: Optional[Any]=6144 , _SCREAMING_SNAKE_CASE: Optional[Any]=4 , _SCREAMING_SNAKE_CASE: Union[str, Any]=384 , _SCREAMING_SNAKE_CASE: Optional[Any]=920 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1e-5 , _SCREAMING_SNAKE_CASE: List[Any]=0.3 , _SCREAMING_SNAKE_CASE: Optional[Any]="relu" , _SCREAMING_SNAKE_CASE: Optional[int]=0.02 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.3 , _SCREAMING_SNAKE_CASE: Dict=0.3 , _SCREAMING_SNAKE_CASE: List[Any]=1 , _SCREAMING_SNAKE_CASE: Optional[Any]=0 , _SCREAMING_SNAKE_CASE: List[str]=2 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1 , _SCREAMING_SNAKE_CASE: Tuple=0.3 , _SCREAMING_SNAKE_CASE: Dict=1 , _SCREAMING_SNAKE_CASE: int=(7,) , _SCREAMING_SNAKE_CASE: str=(3,) , _SCREAMING_SNAKE_CASE: Union[str, Any]=80 , _SCREAMING_SNAKE_CASE: Tuple=1 , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: Tuple="sum" , _SCREAMING_SNAKE_CASE: List[str]=False , **_SCREAMING_SNAKE_CASE: Tuple , ) -> Tuple:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : str = hidden_size
__lowerCAmelCase : str = num_hidden_layers
__lowerCAmelCase : str = intermediate_size
__lowerCAmelCase : List[Any] = num_attention_heads
__lowerCAmelCase : Dict = attention_head_dim
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : str = layer_norm_eps
__lowerCAmelCase : Tuple = layerdrop
__lowerCAmelCase : str = hidden_act
__lowerCAmelCase : List[Any] = initializer_range
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCAmelCase : str = pad_token_id
__lowerCAmelCase : Optional[int] = bos_token_id
__lowerCAmelCase : Union[str, Any] = eos_token_id
__lowerCAmelCase : Any = conv_glu_dim
__lowerCAmelCase : Optional[int] = conv_dropout
__lowerCAmelCase : Union[str, Any] = num_conv_layers
__lowerCAmelCase : Optional[int] = input_feat_per_channel
__lowerCAmelCase : Union[str, Any] = input_channels
__lowerCAmelCase : Optional[Any] = conv_channels
__lowerCAmelCase : Dict = ctc_loss_reduction
__lowerCAmelCase : int = ctc_zero_infinity
# prevents config testing fail with exporting to json
__lowerCAmelCase : List[str] = list(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = list(_SCREAMING_SNAKE_CASE)
if len(self.conv_kernel) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel)}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""") | 269 | 0 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a : str = logging.get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
snake_case_ = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''', out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
snake_case_ = MaskFormerConfig(backbone_config=__UpperCAmelCase )
snake_case_ = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
snake_case_ = 847
snake_case_ = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
snake_case_ = 150
snake_case_ = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
snake_case_ = 171
snake_case_ = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
snake_case_ = 133
snake_case_ = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
snake_case_ = 19
snake_case_ = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
snake_case_ = 65
snake_case_ = '''mapillary-vistas-id2label.json'''
snake_case_ = json.load(open(hf_hub_download(__UpperCAmelCase, __UpperCAmelCase, repo_type='''dataset''' ), '''r''' ) )
snake_case_ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
return config
def __magic_name__ ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
snake_case_ = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.layers.{i}.downsample.reduction.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.layers.{i}.downsample.norm.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.layers.{i}.downsample.norm.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.bias") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3, 0, -1 ), range(0, 3 ) ):
rename_keys.append((F"sem_seg_head.adapter_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight") )
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight") )
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias") )
# cross-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias") )
# MLP 1
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight", F"model.transformer_module.decoder.layers.{idx}.fc1.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias", F"model.transformer_module.decoder.layers.{idx}.fc1.bias") )
# MLP 2
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight", F"model.transformer_module.decoder.layers.{idx}.fc2.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias", F"model.transformer_module.decoder.layers.{idx}.fc2.bias") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias") )
# layernorm 3 (final layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.weight", F"mask_embedder.{i}.0.weight") )
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.bias", F"mask_embedder.{i}.0.bias") )
# fmt: on
return rename_keys
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
snake_case_ = dct.pop(__UpperCAmelCase )
snake_case_ = val
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
snake_case_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
snake_case_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
snake_case_ = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.weight" )
snake_case_ = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[:dim, :]
snake_case_ = in_proj_bias[: dim]
snake_case_ = in_proj_weight[
dim : dim * 2, :
]
snake_case_ = in_proj_bias[
dim : dim * 2
]
snake_case_ = in_proj_weight[
-dim :, :
]
snake_case_ = in_proj_bias[-dim :]
# fmt: on
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
snake_case_ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
snake_case_ = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight" )
snake_case_ = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[: hidden_size, :]
snake_case_ = in_proj_bias[:config.hidden_size]
snake_case_ = in_proj_weight[hidden_size : hidden_size * 2, :]
snake_case_ = in_proj_bias[hidden_size : hidden_size * 2]
snake_case_ = in_proj_weight[-hidden_size :, :]
snake_case_ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
snake_case_ = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight" )
snake_case_ = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[: hidden_size, :]
snake_case_ = in_proj_bias[:config.hidden_size]
snake_case_ = in_proj_weight[hidden_size : hidden_size * 2, :]
snake_case_ = in_proj_bias[hidden_size : hidden_size * 2]
snake_case_ = in_proj_weight[-hidden_size :, :]
snake_case_ = in_proj_bias[-hidden_size :]
# fmt: on
def __magic_name__ ( ) -> torch.Tensor:
'''simple docstring'''
snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ = Image.open(requests.get(__UpperCAmelCase, stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = False ) -> int:
'''simple docstring'''
snake_case_ = get_maskformer_config(__UpperCAmelCase )
# load original state_dict
with open(__UpperCAmelCase, '''rb''' ) as f:
snake_case_ = pickle.load(__UpperCAmelCase )
snake_case_ = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
snake_case_ = create_rename_keys(__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
read_in_swin_q_k_v(__UpperCAmelCase, config.backbone_config )
read_in_decoder_q_k_v(__UpperCAmelCase, __UpperCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
snake_case_ = torch.from_numpy(__UpperCAmelCase )
# load 🤗 model
snake_case_ = MaskFormerForInstanceSegmentation(__UpperCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(__UpperCAmelCase, param.shape )
snake_case_ ,snake_case_ = model.load_state_dict(__UpperCAmelCase, strict=__UpperCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__UpperCAmelCase ) == 0, F"Unexpected keys: {unexpected_keys}"
# verify results
snake_case_ = prepare_img()
if "vistas" in model_name:
snake_case_ = 65
elif "cityscapes" in model_name:
snake_case_ = 6_5535
else:
snake_case_ = 255
snake_case_ = True if '''ade''' in model_name else False
snake_case_ = MaskFormerImageProcessor(ignore_index=__UpperCAmelCase, reduce_labels=__UpperCAmelCase )
snake_case_ = image_processor(__UpperCAmelCase, return_tensors='''pt''' )
snake_case_ = model(**__UpperCAmelCase )
print('''Logits:''', outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
snake_case_ = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3], __UpperCAmelCase, atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"Saving model and image processor to {pytorch_dump_folder_path}" )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
image_processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F"nielsr/{model_name}" )
image_processor.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a : Optional[Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 56 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
__snake_case : Optional[Any] = [8, 5, 9, 7]
__snake_case : List[Any] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__snake_case : Optional[int] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class A__ :
'''simple docstring'''
def __init__( self: Any , _SCREAMING_SNAKE_CASE: list[int] , _SCREAMING_SNAKE_CASE: list[list[int]] , _SCREAMING_SNAKE_CASE: list[list[int]] , ) -> None:
"""simple docstring"""
__lowerCAmelCase : Any = claim_vector
__lowerCAmelCase : Tuple = allocated_resources_table
__lowerCAmelCase : Tuple = maximum_claim_table
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table)
for i in range(len(self.__allocated_resources_table[0]))
]
def _SCREAMING_SNAKE_CASE ( self: int) -> list[int]:
"""simple docstring"""
return np.array(self.__claim_vector) - np.array(
self.__processes_resource_summation())
def _SCREAMING_SNAKE_CASE ( self: int) -> list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i]) - np.array(_SCREAMING_SNAKE_CASE))
for i, allocated_resource in enumerate(self.__allocated_resources_table)
]
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(_SCREAMING_SNAKE_CASE): i for i in self.__need()}
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , **_SCREAMING_SNAKE_CASE: List[Any]) -> None:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.__need()
__lowerCAmelCase : int = self.__allocated_resources_table
__lowerCAmelCase : Dict = self.__available_resources()
__lowerCAmelCase : str = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n")
while need_list:
__lowerCAmelCase : int = False
for each_need in need_list:
__lowerCAmelCase : Dict = True
for index, need in enumerate(_SCREAMING_SNAKE_CASE):
if need > available_resources[index]:
__lowerCAmelCase : Dict = False
break
if execution:
__lowerCAmelCase : Any = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowerCAmelCase : Union[str, Any] = original_need_index
print(F"""Process {process_number + 1} is executing.""")
# remove the process run from stack
need_list.remove(_SCREAMING_SNAKE_CASE)
# update available/freed resources stack
__lowerCAmelCase : Dict = np.array(_SCREAMING_SNAKE_CASE) + np.array(
alloc_resources_table[process_number])
print(
"Updated available resource stack for processes: "
+ " ".join([str(_SCREAMING_SNAKE_CASE) for x in available_resources]))
break
if safe:
print("The process is in a safe state.\n")
else:
print("System in unsafe state. Aborting...\n")
break
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> List[Any]:
"""simple docstring"""
print(" " * 9 + "Allocated Resource Table")
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(_SCREAMING_SNAKE_CASE) + 1}"""
+ " ".join(F"""{it:>8}""" for it in item)
+ "\n")
print(" " * 9 + "System Resource Table")
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(_SCREAMING_SNAKE_CASE) + 1}"""
+ " ".join(F"""{it:>8}""" for it in item)
+ "\n")
print(
"Current Usage by Active Processes: "
+ " ".join(str(_SCREAMING_SNAKE_CASE) for x in self.__claim_vector))
print(
"Initial Available Resources: "
+ " ".join(str(_SCREAMING_SNAKE_CASE) for x in self.__available_resources()))
time.sleep(1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 269 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Tuple = logging.get_logger(__name__)
A : Optional[int] = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Dict ="""bert"""
def __init__( self , __a=3_05_22 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=2 , __a=0.0_2 , __a=1e-1_2 , __a=0 , __a="absolute" , __a=True , __a=None , **__a , ):
super().__init__(pad_token_id=__a , **__a )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
__lowerCAmelCase = classifier_dropout
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def snake_case ( self ):
if self.task == "multiple-choice":
__lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
__lowerCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 57 |
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _lowercase ( *__snake_case ) -> Optional[Any]:
with open(__snake_case ,"r" ) as fh:
fcntl.flock(__snake_case ,fcntl.LOCK_EX )
try:
print(*__snake_case )
finally:
fcntl.flock(__snake_case ,fcntl.LOCK_UN )
__snake_case : List[Any] = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
__snake_case : List[str] = torch.device('cuda', local_rank)
__snake_case : Optional[Any] = socket.gethostname()
__snake_case : str = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__snake_case : Tuple = dist.get_rank()
__snake_case : Any = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise | 269 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class a_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A , A=7 , A=3 , A=18 , A=30 , A=400 , A=True , A=None , A=True , A=False , A=True , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = min_resolution
_SCREAMING_SNAKE_CASE = max_resolution
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 18, """width""": 20}
_SCREAMING_SNAKE_CASE = do_thumbnail
_SCREAMING_SNAKE_CASE = do_align_axis
_SCREAMING_SNAKE_CASE = do_pad
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = image_mean
_SCREAMING_SNAKE_CASE = image_std
def snake_case_( self ) -> Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = DonutImageProcessor if is_vision_available() else None
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = DonutImageProcessingTester(self )
@property
def snake_case_( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , """do_resize""" ) )
self.assertTrue(hasattr(A , """size""" ) )
self.assertTrue(hasattr(A , """do_thumbnail""" ) )
self.assertTrue(hasattr(A , """do_align_long_axis""" ) )
self.assertTrue(hasattr(A , """do_pad""" ) )
self.assertTrue(hasattr(A , """do_normalize""" ) )
self.assertTrue(hasattr(A , """image_mean""" ) )
self.assertTrue(hasattr(A , """image_std""" ) )
def snake_case_( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def snake_case_( self ) -> Optional[int]:
pass
@is_flaky()
def snake_case_( self ) -> Optional[int]:
# Initialize image_processing
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def snake_case_( self ) -> List[Any]:
# Initialize image_processing
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def snake_case_( self ) -> Any:
# Initialize image_processing
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 58 |
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__snake_case : Optional[int] = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
__snake_case : str = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
__snake_case : str = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def _lowercase ( __snake_case ,__snake_case ) -> Union[str, Any]:
return float((preds == labels).mean() )
def _lowercase ( __snake_case ,__snake_case ) -> str:
__lowerCAmelCase : str = simple_accuracy(__snake_case ,__snake_case )
__lowerCAmelCase : Any = float(fa_score(y_true=__snake_case ,y_pred=__snake_case ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowercase ( __snake_case ,__snake_case ) -> int:
__lowerCAmelCase : Union[str, Any] = np.array(__snake_case )
__lowerCAmelCase : Tuple = np.array(__snake_case )
__lowerCAmelCase : List[Any] = en_sentvecs.shape[0]
# mean centering
__lowerCAmelCase : Union[str, Any] = en_sentvecs - np.mean(__snake_case ,axis=0 )
__lowerCAmelCase : int = in_sentvecs - np.mean(__snake_case ,axis=0 )
__lowerCAmelCase : Optional[Any] = cdist(__snake_case ,__snake_case ,"cosine" )
__lowerCAmelCase : int = np.array(range(__snake_case ) )
__lowerCAmelCase : int = sim.argsort(axis=1 )[:, :10]
__lowerCAmelCase : Optional[Any] = np.any(preds == actual[:, None] ,axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: int) -> str:
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64")
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32")),
"references": datasets.Value("int64")
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32")),
}) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[Any]) -> int:
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]") | 269 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 59 |
"""simple docstring"""
def _lowercase ( __snake_case ,__snake_case ) -> float:
if digit_amount > 0:
return round(number - int(__snake_case ) ,__snake_case )
return number - int(__snake_case )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3)) | 269 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : List[Any] , _snake_case : Union[str, Any] ):
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _snake_case ( _snake_case : Optional[Any] , _snake_case : Optional[int]=0 ):
return sorted(_snake_case , key=lambda _snake_case : x[column] )
def _snake_case ( _snake_case : Optional[int] , _snake_case : Any , _snake_case : Optional[int]=float('''inf''' ) ):
for i in range(points_counts - 1 ):
for j in range(i + 1 , _snake_case ):
lowerCAmelCase : Any = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowerCAmelCase : str = current_dis
return min_dis
def _snake_case ( _snake_case : List[str] , _snake_case : List[str] , _snake_case : str=float('''inf''' ) ):
for i in range(min(6 , points_counts - 1 ) , _snake_case ):
for j in range(max(0 , i - 6 ) , _snake_case ):
lowerCAmelCase : List[str] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowerCAmelCase : Tuple = current_dis
return min_dis
def _snake_case ( _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : Any ):
# base case
if points_counts <= 3:
return dis_between_closest_pair(_snake_case , _snake_case )
# recursion
lowerCAmelCase : List[str] = points_counts // 2
lowerCAmelCase : List[str] = closest_pair_of_points_sqr(
_snake_case , points_sorted_on_y[:mid] , _snake_case )
lowerCAmelCase : List[Any] = closest_pair_of_points_sqr(
_snake_case , points_sorted_on_y[mid:] , points_counts - mid )
lowerCAmelCase : List[Any] = min(_snake_case , _snake_case )
lowerCAmelCase : Optional[Any] = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_snake_case )
lowerCAmelCase : List[str] = dis_between_closest_in_strip(
_snake_case , len(_snake_case ) , _snake_case )
return min(_snake_case , _snake_case )
def _snake_case ( _snake_case : int , _snake_case : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = column_based_sort(_snake_case , column=0 )
lowerCAmelCase : Dict = column_based_sort(_snake_case , column=1 )
return (
closest_pair_of_points_sqr(
_snake_case , _snake_case , _snake_case )
) ** 0.5
if __name__ == "__main__":
snake_case__ : str = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('''Distance:''', closest_pair_of_points(points, len(points)))
| 60 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'torchsde']
def __init__( self: int , *_SCREAMING_SNAKE_CASE: Optional[Any] , **_SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
requires_backends(self , ["torch", "torchsde"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Optional[Any] , *_SCREAMING_SNAKE_CASE: Optional[int] , **_SCREAMING_SNAKE_CASE: Optional[int]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "torchsde"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Dict , *_SCREAMING_SNAKE_CASE: List[Any] , **_SCREAMING_SNAKE_CASE: Any) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch", "torchsde"]) | 269 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = '▁'
_a = {'vocab_file': 'sentencepiece.bpe.model'}
_a = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
_a = {
'facebook/mbart-large-en-ro': 1_024,
'facebook/mbart-large-cc25': 1_024,
}
# fmt: off
_a = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE__ : List[int] = []
SCREAMING_SNAKE_CASE__ : List[int] = []
def __init__( self , lowercase_ , lowercase_="<s>" , lowercase_="</s>" , lowercase_="</s>" , lowercase_="<s>" , lowercase_="<unk>" , lowercase_="<pad>" , lowercase_="<mask>" , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_ = None , lowercase_=None , **lowercase_ , ):
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : List[Any] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
UpperCAmelCase_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , tokenizer_file=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
UpperCAmelCase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase_ ) )
UpperCAmelCase_ : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase_ : List[str] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : Tuple = len(self.sp_model )
UpperCAmelCase_ : Tuple = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowercase_ )
}
UpperCAmelCase_ : int = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase_ : int = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase_ : str = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCAmelCase_ : str = src_lang if src_lang is not None else "en_XX"
UpperCAmelCase_ : Any = self.lang_code_to_id[self._src_lang]
UpperCAmelCase_ : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.__dict__.copy()
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : List[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ : List[str] = {}
UpperCAmelCase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
UpperCAmelCase_ : Optional[Any] = [1] * len(self.prefix_tokens )
UpperCAmelCase_ : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase_ )) + suffix_ones
return prefix_ones + ([0] * len(lowercase_ )) + ([0] * len(lowercase_ )) + suffix_ones
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = [self.sep_token_id]
UpperCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCAmelCase_ : List[str] = src_lang
UpperCAmelCase_ : Any = self(lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , **lowercase_ )
UpperCAmelCase_ : Union[str, Any] = self.convert_tokens_to_ids(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = tgt_lang_id
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_ : Tuple = self.sp_model.PieceToId(lowercase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = "".join(lowercase_ ).replace(lowercase_ , " " ).strip()
return out_string
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : Union[str, Any] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , "wb" ) as fi:
UpperCAmelCase_ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = "en_XX" , lowercase_ = None , lowercase_ = "ro_RO" , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = src_lang
UpperCAmelCase_ : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(lowercase_ , lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.lang_code_to_id[src_lang]
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.lang_code_to_id[lang]
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
| 61 |
"""simple docstring"""
def _lowercase ( ) -> int:
return 1
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(__snake_case )
def _lowercase ( __snake_case = 200 ) -> int:
return two_pound(__snake_case )
if __name__ == "__main__":
print(solution(int(input().strip()))) | 269 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['GLPNFeatureExtractor']
_A = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8") as input_file:
__lowerCAmelCase : List[str] = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)")
__lowerCAmelCase : List[Any] = input_file.read()
__lowerCAmelCase : Any = regexp.search(_SCREAMING_SNAKE_CASE)
return match
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: str) -> Optional[Any]:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8") as input_file:
__lowerCAmelCase : Any = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL)
__lowerCAmelCase : Optional[int] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__lowerCAmelCase : int = regexp.finditer(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def _SCREAMING_SNAKE_CASE ( self: str) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = Path("./datasets")
__lowerCAmelCase : Optional[int] = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_SCREAMING_SNAKE_CASE)):
raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""")
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Dict = Path("./datasets")
__lowerCAmelCase : Union[str, Any] = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_print_statements(str(_SCREAMING_SNAKE_CASE)):
raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""") | 269 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : str = '▁'
lowerCAmelCase_ : Tuple = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/mbart-large-50-one-to-many-mmt': (
'https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'
),
}
}
lowerCAmelCase_ : int = {
'facebook/mbart-large-50-one-to-many-mmt': 10_24,
}
# fmt: off
lowerCAmelCase_ : Optional[Any] = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN', 'af_ZA', 'az_AZ', 'bn_IN', 'fa_IR', 'he_IL', 'hr_HR', 'id_ID', 'ka_GE', 'km_KH', 'mk_MK', 'ml_IN', 'mn_MN', 'mr_IN', 'pl_PL', 'ps_AF', 'pt_XX', 'sv_SE', 'sw_KE', 'ta_IN', 'te_IN', 'th_TH', 'tl_XX', 'uk_UA', 'ur_PK', 'xh_ZA', 'gl_ES', 'sl_SI']
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =['input_ids', 'attention_mask']
__a =[]
__a =[]
def __init__( self : Optional[int] , __a : Dict , __a : Optional[Any]=None , __a : Optional[int]=None , __a : Tuple="</s>" , __a : List[Any]="</s>" , __a : Any="<s>" , __a : int="<unk>" , __a : Dict="<pad>" , __a : Tuple="<mask>" , __a : Optional[Dict[str, Any]] = None , **__a : List[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
_a = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
_a = {} if sp_model_kwargs is None else sp_model_kwargs
_a = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__a , tgt_lang=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
_a = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_a = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_a = 1
_a = len(self.sp_model )
_a = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__a )
}
_a = {v: k for k, v in self.lang_code_to_id.items()}
_a = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_a = src_lang if src_lang is not None else "en_XX"
_a = self.lang_code_to_id[self._src_lang]
_a = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase__ ( self : int ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCamelCase__ ( self : Optional[int] ):
return self._src_lang
@src_lang.setter
def UpperCamelCase__ ( self : Optional[Any] , __a : str ):
_a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[str] ):
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self : Union[str, Any] , __a : Dict ):
_a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_a = {}
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self : List[str] ):
_a = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self : Tuple , __a : str ):
return self.sp_model.encode(__a , out_type=__a )
def UpperCamelCase__ ( self : Dict , __a : str ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_a = self.sp_model.PieceToId(__a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase__ ( self : Tuple , __a : int ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase__ ( self : Dict , __a : Dict ):
_a = []
_a = ""
_a = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a ) + token
_a = True
_a = []
else:
current_sub_tokens.append(__a )
_a = False
out_string += self.sp_model.decode(__a )
return out_string.strip()
def UpperCamelCase__ ( self : List[str] , __a : str , __a : Optional[str] = None ):
if not os.path.isdir(__a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_a = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
def UpperCamelCase__ ( self : Optional[int] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
_a = [1] * len(self.prefix_tokens )
_a = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def UpperCamelCase__ ( self : Dict , __a : List[int] , __a : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase__ ( self : str , __a : str , __a : str , __a : Optional[str] , __a : Optional[str] , **__a : Tuple ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
_a = src_lang
_a = self(__a , add_special_tokens=__a , return_tensors=__a , **__a )
_a = self.convert_tokens_to_ids(__a )
_a = tgt_lang_id
return inputs
def UpperCamelCase__ ( self : List[str] , __a : List[str] , __a : str = "en_XX" , __a : Optional[List[str]] = None , __a : str = "ro_RO" , **__a : int , ):
_a = src_lang
_a = tgt_lang
return super().prepare_seqaseq_batch(__a , __a , **__a )
def UpperCamelCase__ ( self : str ):
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase__ ( self : int ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase__ ( self : List[Any] , __a : str ):
_a = self.lang_code_to_id[src_lang]
_a = [self.cur_lang_code_id]
_a = [self.eos_token_id]
def UpperCamelCase__ ( self : Optional[Any] , __a : str ):
_a = self.lang_code_to_id[tgt_lang]
_a = [self.cur_lang_code_id]
_a = [self.eos_token_id]
| 63 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__snake_case : Optional[Any] = logging.get_logger(__name__)
__snake_case : Optional[Any] = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
__snake_case : Any = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
__snake_case : str = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'whisper'
SCREAMING_SNAKE_CASE = ['past_key_values']
SCREAMING_SNAKE_CASE = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: Any=5_1865 , _SCREAMING_SNAKE_CASE: Optional[Any]=80 , _SCREAMING_SNAKE_CASE: Optional[int]=6 , _SCREAMING_SNAKE_CASE: Any=4 , _SCREAMING_SNAKE_CASE: Dict=6 , _SCREAMING_SNAKE_CASE: Dict=4 , _SCREAMING_SNAKE_CASE: Optional[Any]=1536 , _SCREAMING_SNAKE_CASE: List[str]=1536 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: str=0.0 , _SCREAMING_SNAKE_CASE: List[str]=5_0257 , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: List[str]=True , _SCREAMING_SNAKE_CASE: Optional[int]="gelu" , _SCREAMING_SNAKE_CASE: Tuple=256 , _SCREAMING_SNAKE_CASE: str=0.0 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: List[Any]=0.02 , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: Union[str, Any]=1500 , _SCREAMING_SNAKE_CASE: str=448 , _SCREAMING_SNAKE_CASE: Any=5_0256 , _SCREAMING_SNAKE_CASE: Any=5_0256 , _SCREAMING_SNAKE_CASE: List[str]=5_0256 , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: List[Any]=[220, 5_0256] , _SCREAMING_SNAKE_CASE: Dict=False , _SCREAMING_SNAKE_CASE: str=256 , _SCREAMING_SNAKE_CASE: List[str]=False , _SCREAMING_SNAKE_CASE: Tuple=0.05 , _SCREAMING_SNAKE_CASE: List[str]=10 , _SCREAMING_SNAKE_CASE: str=2 , _SCREAMING_SNAKE_CASE: Any=0.0 , _SCREAMING_SNAKE_CASE: Optional[int]=10 , _SCREAMING_SNAKE_CASE: int=0 , _SCREAMING_SNAKE_CASE: Any=7 , **_SCREAMING_SNAKE_CASE: List[str] , ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = vocab_size
__lowerCAmelCase : Optional[Any] = num_mel_bins
__lowerCAmelCase : int = d_model
__lowerCAmelCase : List[Any] = encoder_layers
__lowerCAmelCase : List[Any] = encoder_attention_heads
__lowerCAmelCase : List[str] = decoder_layers
__lowerCAmelCase : Tuple = decoder_attention_heads
__lowerCAmelCase : Any = decoder_ffn_dim
__lowerCAmelCase : Tuple = encoder_ffn_dim
__lowerCAmelCase : List[str] = dropout
__lowerCAmelCase : Union[str, Any] = attention_dropout
__lowerCAmelCase : Union[str, Any] = activation_dropout
__lowerCAmelCase : Dict = activation_function
__lowerCAmelCase : Tuple = init_std
__lowerCAmelCase : str = encoder_layerdrop
__lowerCAmelCase : int = decoder_layerdrop
__lowerCAmelCase : Optional[int] = use_cache
__lowerCAmelCase : Union[str, Any] = encoder_layers
__lowerCAmelCase : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase : int = max_source_positions
__lowerCAmelCase : Any = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase : Dict = classifier_proj_size
__lowerCAmelCase : Dict = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase : int = apply_spec_augment
__lowerCAmelCase : Union[str, Any] = mask_time_prob
__lowerCAmelCase : str = mask_time_length
__lowerCAmelCase : int = mask_time_min_masks
__lowerCAmelCase : List[Any] = mask_feature_prob
__lowerCAmelCase : Tuple = mask_feature_length
__lowerCAmelCase : Any = mask_feature_min_masks
__lowerCAmelCase : Union[str, Any] = median_filter_width
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , suppress_tokens=_SCREAMING_SNAKE_CASE , begin_suppress_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__lowerCAmelCase : List[str] = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
])
if self.use_past:
__lowerCAmelCase : Tuple = {0: "batch"}
else:
__lowerCAmelCase : Union[str, Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction="inputs")
return common_inputs
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: Optional["TensorType"] = None , _SCREAMING_SNAKE_CASE: int = 2_2050 , _SCREAMING_SNAKE_CASE: float = 5.0 , _SCREAMING_SNAKE_CASE: int = 220 , ) -> Mapping[str, Any]:
"""simple docstring"""
__lowerCAmelCase : int = OrderedDict()
__lowerCAmelCase : Optional[Any] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , time_duration=_SCREAMING_SNAKE_CASE , frequency=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[str] = encoder_inputs["input_features"].shape[2]
__lowerCAmelCase : List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
__lowerCAmelCase : List[Any] = super().generate_dummy_inputs(
preprocessor.tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = encoder_inputs.pop("input_features")
__lowerCAmelCase : List[Any] = decoder_inputs.pop("decoder_input_ids")
if "past_key_values" in decoder_inputs:
__lowerCAmelCase : int = decoder_inputs.pop("past_key_values")
return dummy_inputs
@property
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> float:
"""simple docstring"""
return 1e-3 | 269 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 64 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__snake_case : Optional[int] = 50_000
__snake_case : Dict = 5_000
__snake_case , __snake_case : Union[str, Any] = os.path.split(__file__)
__snake_case : Any = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
for i in range(__snake_case ):
__lowerCAmelCase : Union[str, Any] = dataset[i]
@get_duration
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Dict:
for i in range(0 ,len(__snake_case ) ,__snake_case ):
__lowerCAmelCase : List[str] = dataset[i : i + batch_size]
@get_duration
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Dict:
with dataset.formatted_as(type=__snake_case ):
for i in range(__snake_case ):
__lowerCAmelCase : Union[str, Any] = dataset[i]
@get_duration
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ) -> str:
with dataset.formatted_as(type=__snake_case ):
for i in range(0 ,__snake_case ,__snake_case ):
__lowerCAmelCase : Optional[int] = dataset[i : i + batch_size]
def _lowercase ( ) -> Union[str, Any]:
__lowerCAmelCase : Optional[int] = {"num examples": SPEED_TEST_N_EXAMPLES}
__lowerCAmelCase : Optional[int] = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
__lowerCAmelCase : Any = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
__lowerCAmelCase : int = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
__lowerCAmelCase : str = generate_example_dataset(
os.path.join(__snake_case ,"dataset.arrow" ) ,__snake_case ,num_examples=__snake_case ,seq_shapes={"list": (100,)} ,)
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ ,str(__snake_case ) )
__lowerCAmelCase : str = func(__snake_case ,**__snake_case )
print("shuffling dataset" )
__lowerCAmelCase : Optional[int] = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " ,func.__name__ ,str(__snake_case ) )
__lowerCAmelCase : List[str] = func(
__snake_case ,**__snake_case )
with open(__snake_case ,"wb" ) as f:
f.write(json.dumps(__snake_case ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating() | 269 | 0 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
UpperCamelCase__ = logging.get_logger(__name__)
class A ( UpperCAmelCase_ ):
def __init__(self : Any , *__UpperCAmelCase : Any , **__UpperCAmelCase : Tuple ) -> None:
"""simple docstring"""
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 65 |
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[str]=13 , _SCREAMING_SNAKE_CASE: Tuple=7 , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: str=False , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: int=99 , _SCREAMING_SNAKE_CASE: int=32 , _SCREAMING_SNAKE_CASE: List[str]=5 , _SCREAMING_SNAKE_CASE: Union[str, Any]=4 , _SCREAMING_SNAKE_CASE: int=64 , _SCREAMING_SNAKE_CASE: List[str]="gelu" , _SCREAMING_SNAKE_CASE: str=0.1 , _SCREAMING_SNAKE_CASE: Any=0.1 , _SCREAMING_SNAKE_CASE: Optional[int]=512 , _SCREAMING_SNAKE_CASE: Tuple=16 , _SCREAMING_SNAKE_CASE: Any=2 , _SCREAMING_SNAKE_CASE: List[str]=0.02 , _SCREAMING_SNAKE_CASE: Tuple=3 , _SCREAMING_SNAKE_CASE: Optional[Any]=4 , _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: int=2 , _SCREAMING_SNAKE_CASE: str=2 , _SCREAMING_SNAKE_CASE: Union[str, Any]=2 , _SCREAMING_SNAKE_CASE: List[Any]=2 , _SCREAMING_SNAKE_CASE: int=4 , _SCREAMING_SNAKE_CASE: List[str]=1 , ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = parent
__lowerCAmelCase : Optional[Any] = batch_size
__lowerCAmelCase : Union[str, Any] = seq_length
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : Optional[int] = use_input_mask
__lowerCAmelCase : Dict = use_token_type_ids
__lowerCAmelCase : Dict = use_labels
__lowerCAmelCase : Dict = vocab_size
__lowerCAmelCase : Tuple = hidden_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : Tuple = intermediate_size
__lowerCAmelCase : List[Any] = hidden_act
__lowerCAmelCase : Optional[Any] = hidden_dropout_prob
__lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : Union[str, Any] = type_vocab_size
__lowerCAmelCase : Optional[int] = type_sequence_label_size
__lowerCAmelCase : Dict = initializer_range
__lowerCAmelCase : Tuple = num_labels
__lowerCAmelCase : Optional[Any] = num_choices
__lowerCAmelCase : Union[str, Any] = scope
__lowerCAmelCase : Optional[Any] = q_groups
__lowerCAmelCase : Optional[int] = k_groups
__lowerCAmelCase : Any = v_groups
__lowerCAmelCase : int = post_attention_groups
__lowerCAmelCase : List[str] = intermediate_groups
__lowerCAmelCase : Optional[Any] = output_groups
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
__lowerCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length])
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : str = None
if self.use_labels:
__lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices)
__lowerCAmelCase : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Tuple) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[Any] = SqueezeBertModel(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Tuple) -> Dict:
"""simple docstring"""
__lowerCAmelCase : int = SqueezeBertForMaskedLM(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : str = SqueezeBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Union[str, Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Tuple) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.num_labels
__lowerCAmelCase : Union[str, Any] = SqueezeBertForSequenceClassification(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.num_labels
__lowerCAmelCase : Optional[int] = SqueezeBertForTokenClassification(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.num_choices
__lowerCAmelCase : str = SqueezeBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : int = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase : Union[str, Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase : str = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _SCREAMING_SNAKE_CASE ( self: str) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)) : Union[str, Any] = config_and_inputs
__lowerCAmelCase : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = SqueezeBertModelTester(self)
__lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , dim=37)
def _SCREAMING_SNAKE_CASE ( self: Any) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE)
@slow
def _SCREAMING_SNAKE_CASE ( self: Any) -> Dict:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[Any] = SqueezeBertModel.from_pretrained(_SCREAMING_SNAKE_CASE)
self.assertIsNotNone(_SCREAMING_SNAKE_CASE)
@require_sentencepiece
@require_tokenizers
@require_torch
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: int) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli")
__lowerCAmelCase : List[Any] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]])
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE)[0]
__lowerCAmelCase : Any = torch.Size((1, 3))
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = torch.tensor([[0.6401, -0.0349, -0.6041]])
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-4)) | 269 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Optional[Any] , snake_case: Tuple , snake_case: Union[str, Any]=13 , snake_case: Optional[int]=7 , snake_case: Union[str, Any]=True , snake_case: Any=True , snake_case: str=True , snake_case: Dict=True , snake_case: int=99 , snake_case: Optional[Any]=32 , snake_case: Optional[Any]=2 , snake_case: Tuple=4 , snake_case: int=37 , snake_case: Any="gelu" , snake_case: List[str]=0.1 , snake_case: List[Any]=0.1 , snake_case: Optional[int]=512 , snake_case: int=16 , snake_case: str=2 , snake_case: int=0.0_2 , snake_case: str=False , snake_case: Any=True , snake_case: List[Any]="None" , snake_case: Optional[Any]=3 , snake_case: Optional[Any]=4 , snake_case: List[str]=None , ) -> str:
snake_case_ :Optional[Any] = parent
snake_case_ :str = batch_size
snake_case_ :Tuple = seq_length
snake_case_ :Any = is_training
snake_case_ :Dict = use_input_mask
snake_case_ :str = use_token_type_ids
snake_case_ :List[str] = use_labels
snake_case_ :Optional[int] = vocab_size
snake_case_ :Dict = hidden_size
snake_case_ :List[Any] = num_hidden_layers
snake_case_ :Optional[Any] = num_attention_heads
snake_case_ :int = intermediate_size
snake_case_ :Optional[Any] = hidden_act
snake_case_ :Dict = hidden_dropout_prob
snake_case_ :str = attention_probs_dropout_prob
snake_case_ :int = max_position_embeddings
snake_case_ :int = type_vocab_size
snake_case_ :Tuple = type_sequence_label_size
snake_case_ :Dict = initializer_range
snake_case_ :List[str] = num_labels
snake_case_ :Union[str, Any] = num_choices
snake_case_ :str = relative_attention
snake_case_ :Any = position_biased_input
snake_case_ :str = pos_att_type
snake_case_ :int = scope
def lowerCAmelCase_ ( self: List[str] ) -> Any:
snake_case_ :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ :List[Any] = None
if self.use_input_mask:
snake_case_ :Tuple = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ :List[str] = None
if self.use_token_type_ids:
snake_case_ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ :Union[str, Any] = None
snake_case_ :Dict = None
snake_case_ :List[str] = None
if self.use_labels:
snake_case_ :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ :int = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=snake_case , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self: Optional[int] , snake_case: List[str] , snake_case: Optional[int] , snake_case: str , snake_case: Any , snake_case: List[str] , snake_case: List[str] , snake_case: str ) -> Any:
snake_case_ :Any = TFDebertaVaModel(config=snake_case )
snake_case_ :List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ :int = [input_ids, input_mask]
snake_case_ :Dict = model(snake_case )
snake_case_ :List[Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: Optional[Any] , snake_case: List[Any] , snake_case: str , snake_case: int , snake_case: List[Any] , snake_case: int , snake_case: int ) -> Optional[Any]:
snake_case_ :str = TFDebertaVaForMaskedLM(config=snake_case )
snake_case_ :Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case_ :Optional[Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self: int , snake_case: str , snake_case: List[Any] , snake_case: int , snake_case: Union[str, Any] , snake_case: Tuple , snake_case: int , snake_case: str ) -> str:
snake_case_ :Tuple = self.num_labels
snake_case_ :Tuple = TFDebertaVaForSequenceClassification(config=snake_case )
snake_case_ :List[str] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case_ :int = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self: List[Any] , snake_case: Union[str, Any] , snake_case: int , snake_case: Tuple , snake_case: int , snake_case: Optional[int] , snake_case: str , snake_case: List[Any] ) -> List[str]:
snake_case_ :Optional[Any] = self.num_labels
snake_case_ :Any = TFDebertaVaForTokenClassification(config=snake_case )
snake_case_ :Union[str, Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case_ :int = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self: List[str] , snake_case: Optional[int] , snake_case: str , snake_case: List[Any] , snake_case: Any , snake_case: Dict , snake_case: Union[str, Any] , snake_case: Optional[int] ) -> str:
snake_case_ :Dict = TFDebertaVaForQuestionAnswering(config=snake_case )
snake_case_ :Any = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case_ :Tuple = model(snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any:
snake_case_ :Optional[int] = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) :Tuple = config_and_inputs
snake_case_ :List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_A : Any = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : Any = False
_A : List[Any] = False
def lowerCAmelCase_ ( self: Dict ) -> List[str]:
snake_case_ :Optional[Any] = TFDebertaVaModelTester(self )
snake_case_ :List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self: Optional[Any] ) -> str:
snake_case_ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]:
snake_case_ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def lowerCAmelCase_ ( self: str ) -> List[Any]:
snake_case_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]:
snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def lowerCAmelCase_ ( self: Any ) -> Optional[Any]:
snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def lowerCAmelCase_ ( self: Tuple ) -> List[Any]:
snake_case_ :List[Any] = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(snake_case )
@require_tf
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def lowerCAmelCase_ ( self: Tuple ) -> int:
pass
@slow
def lowerCAmelCase_ ( self: List[Any] ) -> int:
snake_case_ :List[Any] = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
snake_case_ :Optional[int] = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
snake_case_ :int = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
snake_case_ :Optional[int] = model(snake_case , attention_mask=snake_case )[0]
snake_case_ :Tuple = tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , snake_case , atol=1E-4 )
| 66 |
"""simple docstring"""
import os
from math import logaa
def _lowercase ( __snake_case = "base_exp.txt" ) -> int:
__lowerCAmelCase : float = 0
__lowerCAmelCase : Any = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__snake_case ) ,__snake_case ) ) ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = list(map(__snake_case ,line.split("," ) ) )
if x * logaa(__snake_case ) > largest:
__lowerCAmelCase : Tuple = x * logaa(__snake_case )
__lowerCAmelCase : Optional[Any] = i + 1
return result
if __name__ == "__main__":
print(solution()) | 269 | 0 |
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ ) -> int:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _lowercase ( __snake_case ) -> List[str]:
if isinstance(__snake_case ,collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class A__ :
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Optional[Any]) -> str:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: str) -> int:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Tuple:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: float) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : str = np.abs((a - b)).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , F"""Difference between torch and flax is {diff} (>= {tol}).""")
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Any=None , **_SCREAMING_SNAKE_CASE: Tuple) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim))
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Any]=None , **_SCREAMING_SNAKE_CASE: List[str]) -> Dict:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim))
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any]=None , **_SCREAMING_SNAKE_CASE: Tuple) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = after_output[0]
__lowerCAmelCase : Any = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Any=None , **_SCREAMING_SNAKE_CASE: List[str]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Any = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model(
input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase : List[str] = to_atuple(vision_model.config.image_size)
__lowerCAmelCase : Any = to_atuple(vision_model.config.patch_size)
__lowerCAmelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCAmelCase : Tuple = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
__lowerCAmelCase : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: int) -> str:
"""simple docstring"""
pt_model.to(_SCREAMING_SNAKE_CASE)
pt_model.eval()
# prepare inputs
__lowerCAmelCase : Union[str, Any] = inputs_dict
__lowerCAmelCase : Union[str, Any] = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowerCAmelCase : Any = pt_model(**_SCREAMING_SNAKE_CASE).to_tuple()
__lowerCAmelCase : List[Any] = fx_model(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2)
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = fx_model_loaded(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2)
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE)
pt_model_loaded.to(_SCREAMING_SNAKE_CASE)
pt_model_loaded.eval()
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = pt_model_loaded(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output_loaded.numpy() , 4e-2)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Dict) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = fx_state
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Dict) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params)
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Dict) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.prepare_config_and_inputs()
self.check_save_load(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: int) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_SCREAMING_SNAKE_CASE)
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Dict = self.prepare_config_and_inputs()
__lowerCAmelCase : List[Any] = config_inputs_dict.pop("vision_config")
__lowerCAmelCase : str = config_inputs_dict.pop("text_config")
__lowerCAmelCase : Union[str, Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.check_equivalence_flax_to_pt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
@slow
def _SCREAMING_SNAKE_CASE ( self: str) -> Dict:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Dict = self.get_pretrained_model_and_inputs()
__lowerCAmelCase : Union[str, Any] = model_a(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model_a(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = after_outputs[0]
__lowerCAmelCase : List[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-5)
@require_flax
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = 13
__lowerCAmelCase : Optional[int] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
__lowerCAmelCase : List[Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
__lowerCAmelCase : List[Any] = random_attention_mask([batch_size, 4])
__lowerCAmelCase : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[str] = FlaxViTModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = FlaxBertModel(_SCREAMING_SNAKE_CASE)
return vision_model, text_model
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = FlaxViTModelTester(self)
__lowerCAmelCase : Optional[Any] = FlaxBertModelTester(self)
__lowerCAmelCase : int = vit_model_tester.prepare_config_and_inputs()
__lowerCAmelCase : List[str] = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase : Tuple = vision_config_and_inputs
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = 13
__lowerCAmelCase : List[str] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
__lowerCAmelCase : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
__lowerCAmelCase : str = random_attention_mask([batch_size, 4])
__lowerCAmelCase : Optional[int] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : int = FlaxCLIPVisionModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = FlaxBertModel(_SCREAMING_SNAKE_CASE)
return vision_model, text_model
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = FlaxCLIPVisionModelTester(self)
__lowerCAmelCase : str = FlaxBertModelTester(self)
__lowerCAmelCase : Optional[Any] = clip_model_tester.prepare_config_and_inputs()
__lowerCAmelCase : Dict = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase : Any = vision_config_and_inputs
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0)
__lowerCAmelCase : str = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian")
__lowerCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
__lowerCAmelCase : Optional[int] = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="np")
__lowerCAmelCase : List[str] = model(**_SCREAMING_SNAKE_CASE)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowerCAmelCase : List[str] = np.array([[1.228_4727, 0.310_4122]])
self.assertTrue(np.allclose(outputs.logits_per_image , _SCREAMING_SNAKE_CASE , atol=1e-3)) | 269 | 0 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCAmelCase__ = float("""nan""")
class a__ :
"""simple docstring"""
def __init__( self , lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ = sys.stdout
A__ = open(lowercase , "a" )
def __getattr__( self , lowercase ) -> List[Any]:
'''simple docstring'''
return getattr(self.stdout , lowercase )
def UpperCamelCase ( self , lowercase ) -> List[str]:
'''simple docstring'''
self.stdout.write(lowercase )
# strip tqdm codes
self.file.write(re.sub(R"^.*\r" , "" , lowercase , 0 , re.M ) )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int]=8_0 , SCREAMING_SNAKE_CASE_: List[Any]=False ) -> Union[str, Any]:
'''simple docstring'''
A__ = []
# deal with critical env vars
A__ = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
A__ = os.environ.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if val is not None:
cmd.append(F'{key}={val}' )
# python executable (not always needed if the script is executable)
A__ = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(SCREAMING_SNAKE_CASE_ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
A__ = []
A__ = ""
while len(SCREAMING_SNAKE_CASE_ ) > 0:
current_line += F'{cmd.pop(0 )} '
if len(SCREAMING_SNAKE_CASE_ ) == 0 or len(SCREAMING_SNAKE_CASE_ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(SCREAMING_SNAKE_CASE_ )
A__ = ""
return "\\\n".join(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: int ) -> Union[str, Any]:
'''simple docstring'''
A__ = re.sub(R"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
A__ = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += F' --output_dir {output_dir}'
# ensure we have --overwrite_output_dir
A__ = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: str ) -> int:
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_0_0 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , )
A__ = subprocess.run(SCREAMING_SNAKE_CASE_ , capture_output=SCREAMING_SNAKE_CASE_ , text=SCREAMING_SNAKE_CASE_ )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
A__ = variation.replace(" " , "-" )
with open(Path(SCREAMING_SNAKE_CASE_ ) / F'log.{prefix}.stdout.txt' , "w" ) as f:
f.write(result.stdout )
with open(Path(SCREAMING_SNAKE_CASE_ ) / F'log.{prefix}.stderr.txt' , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F'{output_dir}/all_results.json' , "r" , encoding="utf-8" ) as f:
A__ = json.load(SCREAMING_SNAKE_CASE_ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: str , ) -> List[Any]:
'''simple docstring'''
A__ = []
A__ = []
A__ = F'{id}: {variation:<{longest_variation_len}}'
A__ = F'{preamble}: '
A__ = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(SCREAMING_SNAKE_CASE_ ) , desc=SCREAMING_SNAKE_CASE_ , leave=SCREAMING_SNAKE_CASE_ ):
A__ = process_run_single(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = single_run_metrics[target_metric_key]
if not math.isnan(SCREAMING_SNAKE_CASE_ ):
metrics.append(SCREAMING_SNAKE_CASE_ )
results.append(SCREAMING_SNAKE_CASE_ )
outcome += "✓"
else:
outcome += "✘"
A__ = F'\33[2K\r{outcome}'
if len(SCREAMING_SNAKE_CASE_ ) > 0:
A__ = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
A__ = round(mean_metrics[target_metric_key] , 2 )
A__ = F'{outcome} {mean_target}'
if len(SCREAMING_SNAKE_CASE_ ) > 1:
results_str += F' {tuple(round(SCREAMING_SNAKE_CASE_ , 2 ) for x in results )}'
print(SCREAMING_SNAKE_CASE_ )
A__ = variation
return mean_metrics
else:
print(SCREAMING_SNAKE_CASE_ )
return {variation_key: variation, target_metric_key: nan}
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
A__ = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**3_0:0.2f}GB\n'
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A__ = pd.DataFrame(SCREAMING_SNAKE_CASE_ )
A__ = "variation"
A__ = "diff_%"
A__ = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
A__ = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(SCREAMING_SNAKE_CASE_ ):
# as a fallback, use the minimal value as the sentinel
A__ = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(SCREAMING_SNAKE_CASE_ ):
A__ = df.apply(
lambda SCREAMING_SNAKE_CASE_ : round(1_0_0 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
A__ = [variation_key, target_metric_key, diff_key, *report_metric_keys]
A__ = df.reindex(SCREAMING_SNAKE_CASE_ , axis="columns" ) # reorder cols
# capitalize
A__ = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
A__ = df.rename(lambda SCREAMING_SNAKE_CASE_ : c.replace("_" , "<br>" ) , axis="columns" )
A__ = df.rename(lambda SCREAMING_SNAKE_CASE_ : c.replace("_" , "\n" ) , axis="columns" )
A__ = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=SCREAMING_SNAKE_CASE_ , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=SCREAMING_SNAKE_CASE_ , floatfmt=".2f" )]
print("\n\n".join(SCREAMING_SNAKE_CASE_ ) )
def lowerCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
A__ = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Base cmd" , )
parser.add_argument(
"--variations" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , nargs="+" , required=SCREAMING_SNAKE_CASE_ , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=SCREAMING_SNAKE_CASE_ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=SCREAMING_SNAKE_CASE_ , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=SCREAMING_SNAKE_CASE_ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=SCREAMING_SNAKE_CASE_ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
A__ = parser.parse_args()
A__ = args.output_dir
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
A__ = get_base_command(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# split each dimension into its --foo variations
A__ = [list(map(str.strip , re.split(R"\|" , SCREAMING_SNAKE_CASE_ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
A__ = list(map(str.strip , map(" ".join , itertools.product(*SCREAMING_SNAKE_CASE_ ) ) ) )
A__ = max(len(SCREAMING_SNAKE_CASE_ ) for x in variations )
# split wanted keys
A__ = args.report_metric_keys.split()
# capture prints into a log file for convenience
A__ = F'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'
print(F'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' )
print(F'and this script\'s output is also piped into {report_fn}' )
A__ = Tee(SCREAMING_SNAKE_CASE_ )
print(F'\n*** Running {len(SCREAMING_SNAKE_CASE_ )} benchmarks:' )
print(F'Base command: {" ".join(SCREAMING_SNAKE_CASE_ )}' )
A__ = "variation"
A__ = []
for id, variation in enumerate(tqdm(SCREAMING_SNAKE_CASE_ , desc="Total completion: " , leave=SCREAMING_SNAKE_CASE_ ) ):
A__ = base_cmd + variation.split()
results.append(
process_run(
id + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , args.target_metric_key , SCREAMING_SNAKE_CASE_ , args.repeat_times , SCREAMING_SNAKE_CASE_ , args.verbose , ) )
process_results(SCREAMING_SNAKE_CASE_ , args.target_metric_key , SCREAMING_SNAKE_CASE_ , args.base_variation , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 68 |
"""simple docstring"""
from __future__ import annotations
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ) -> list:
__lowerCAmelCase : Dict = []
__lowerCAmelCase , __lowerCAmelCase : Any = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__lowerCAmelCase : int = result + left + right
return input_list
def _lowercase ( __snake_case ) -> list:
if len(__snake_case ) <= 1:
return input_list
__lowerCAmelCase : int = list(__snake_case )
# iteration for two-way merging
__lowerCAmelCase : Optional[int] = 2
while p <= len(__snake_case ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 ,len(__snake_case ) ,__snake_case ):
__lowerCAmelCase : Union[str, Any] = i
__lowerCAmelCase : Tuple = i + p - 1
__lowerCAmelCase : Optional[Any] = (low + high + 1) // 2
__lowerCAmelCase : Any = merge(__snake_case ,__snake_case ,__snake_case ,__snake_case )
# final merge of last two parts
if p * 2 >= len(__snake_case ):
__lowerCAmelCase : Optional[Any] = i
__lowerCAmelCase : Union[str, Any] = merge(__snake_case ,0 ,__snake_case ,len(__snake_case ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__snake_case : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
__snake_case : Optional[int] = []
else:
__snake_case : int = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted)) | 269 | 0 |
"""simple docstring"""
from typing import Any
class UpperCamelCase :
def __init__( self, lowerCAmelCase__) -> str:
snake_case_ = data
snake_case_ = None
class UpperCamelCase :
def __init__( self) -> Any:
snake_case_ = None
def a_ ( self) -> Tuple:
snake_case_ = self.head
while temp is not None:
print(temp.data, end=' ')
snake_case_ = temp.next
print()
def a_ ( self, lowerCAmelCase__) -> Any:
snake_case_ = Node(lowerCAmelCase__)
snake_case_ = self.head
snake_case_ = new_node
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Optional[int]:
if node_data_a == node_data_a:
return
else:
snake_case_ = self.head
while node_a is not None and node_a.data != node_data_a:
snake_case_ = node_a.next
snake_case_ = self.head
while node_a is not None and node_a.data != node_data_a:
snake_case_ = node_a.next
if node_a is None or node_a is None:
return
snake_case_ , snake_case_ = node_a.data, node_a.data
if __name__ == "__main__":
__UpperCamelCase = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 69 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small")
__lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("google/mt5-small")
__lowerCAmelCase : Tuple = tokenizer("Hello there" , return_tensors="np").input_ids
__lowerCAmelCase : Dict = tokenizer("Hi I am" , return_tensors="np").input_ids
__lowerCAmelCase : str = shift_tokens_right(_SCREAMING_SNAKE_CASE , model.config.pad_token_id , model.config.decoder_start_token_id)
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE).logits
__lowerCAmelCase : int = optax.softmax_cross_entropy(_SCREAMING_SNAKE_CASE , onehot(_SCREAMING_SNAKE_CASE , logits.shape[-1])).mean()
__lowerCAmelCase : List[str] = -(labels.shape[-1] * loss.item())
__lowerCAmelCase : str = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) | 269 | 0 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
A__ : Tuple =logging.get_logger(__name__)
A__ : List[Any] ={
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''',
# See all BART models at https://huggingface.co/models?filter=bart
}
class UpperCAmelCase ( snake_case_ ):
_lowercase: Any = '''bart'''
_lowercase: Union[str, Any] = ['''past_key_values''']
_lowercase: str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Tuple , __snake_case : List[str]=5_02_65 , __snake_case : List[str]=10_24 , __snake_case : Union[str, Any]=12 , __snake_case : Optional[Any]=40_96 , __snake_case : Tuple=16 , __snake_case : Optional[Any]=12 , __snake_case : List[Any]=40_96 , __snake_case : Dict=16 , __snake_case : Any=0.0 , __snake_case : List[str]=0.0 , __snake_case : Union[str, Any]="gelu" , __snake_case : str=10_24 , __snake_case : int=0.1 , __snake_case : int=0.0 , __snake_case : List[str]=0.0 , __snake_case : Union[str, Any]=0.02 , __snake_case : List[Any]=0.0 , __snake_case : Optional[int]=False , __snake_case : List[str]=True , __snake_case : Any=3 , __snake_case : List[Any]=1 , __snake_case : List[str]=0 , __snake_case : Optional[Any]=2 , __snake_case : int=True , __snake_case : List[str]=2 , __snake_case : Optional[int]=2 , **__snake_case : Union[str, Any] , ) -> Optional[Any]:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = classifier_dropout
_lowerCAmelCase = use_cache
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __snake_case ):
_lowerCAmelCase = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
"""The config can simply be saved and uploaded again to be fixed.""" )
class UpperCAmelCase ( snake_case_ ):
@property
def lowercase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_lowerCAmelCase = {0: """batch"""}
_lowerCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
_lowerCAmelCase = {0: """batch""", 1: """decoder_sequence"""}
_lowerCAmelCase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCAmelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(__snake_case ):
_lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
_lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
else:
_lowerCAmelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super().outputs
else:
_lowerCAmelCase = super(__snake_case , self ).outputs
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(__snake_case ):
_lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
_lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def lowercase__ ( self : Optional[int] , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Generate decoder inputs
_lowerCAmelCase = seq_length if not self.use_past else 1
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
_lowerCAmelCase = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
_lowerCAmelCase = dict(**__snake_case , **__snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["""input_ids"""].shape
_lowerCAmelCase = common_inputs["""decoder_input_ids"""].shape[1]
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = decoder_seq_length + 3
_lowerCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCAmelCase = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(__snake_case , __snake_case )] , dim=1 )
_lowerCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase = min(__snake_case , __snake_case )
_lowerCAmelCase = max(__snake_case , __snake_case ) - min_num_layers
_lowerCAmelCase = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(__snake_case ):
common_inputs["past_key_values"].append(
(
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
) )
# TODO: test this.
_lowerCAmelCase = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(__snake_case , __snake_case ):
common_inputs["past_key_values"].append((torch.zeros(__snake_case ), torch.zeros(__snake_case )) )
return common_inputs
def lowercase__ ( self : Tuple , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_lowerCAmelCase = seqlen + 2
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = common_inputs["""attention_mask"""].dtype
_lowerCAmelCase = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(__snake_case , __snake_case , dtype=__snake_case )] , dim=1 )
_lowerCAmelCase = [
(torch.zeros(__snake_case ), torch.zeros(__snake_case )) for _ in range(__snake_case )
]
return common_inputs
def lowercase__ ( self : Any , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCAmelCase = compute_effective_axis_dimension(
__snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase = tokenizer.num_special_tokens_to_add(__snake_case )
_lowerCAmelCase = compute_effective_axis_dimension(
__snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__snake_case )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCAmelCase = dict(tokenizer(__snake_case , return_tensors=__snake_case ) )
return common_inputs
def lowercase__ ( self : Dict , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
elif self.task == "causal-lm":
_lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
else:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
return common_inputs
def lowercase__ ( self : Union[str, Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Optional[Any] , __snake_case : int ) -> List[Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super()._flatten_past_key_values_(__snake_case , __snake_case , __snake_case , __snake_case )
else:
_lowerCAmelCase = super(__snake_case , self )._flatten_past_key_values_(
__snake_case , __snake_case , __snake_case , __snake_case )
| 70 |
"""simple docstring"""
import re
def _lowercase ( __snake_case ) -> str:
if len(re.findall("[ATCG]" ,__snake_case ) ) != len(__snake_case ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" ,"TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 269 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : List[Any] =DiTPipeline
UpperCamelCase__ : Dict =CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
UpperCamelCase__ : Any =PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
UpperCamelCase__ : Any =CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
UpperCamelCase__ : Optional[Any] =False
def __lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : int =TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase__ , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=lowerCamelCase__ , )
__UpperCamelCase : Union[str, Any] =AutoencoderKL()
__UpperCamelCase : int =DDIMScheduler()
__UpperCamelCase : Tuple ={'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
if str(lowerCamelCase__ ).startswith('mps' ):
__UpperCamelCase : Optional[Any] =torch.manual_seed(lowerCamelCase__ )
else:
__UpperCamelCase : Optional[Any] =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__UpperCamelCase : List[Any] ={
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] ='cpu'
__UpperCamelCase : Optional[int] =self.get_dummy_components()
__UpperCamelCase : Optional[Any] =self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : str =self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : str =pipe(**lowerCamelCase__ ).images
__UpperCamelCase : Optional[Any] =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__UpperCamelCase : Tuple =np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
__UpperCamelCase : List[Any] =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase__ , 1E-3 )
def __lowercase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase__ , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowercase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] =DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
__UpperCamelCase : List[Any] =['vase', 'umbrella', 'white shark', 'white wolf']
__UpperCamelCase : Dict =pipe.get_label_ids(lowerCamelCase__ )
__UpperCamelCase : str =pipe(lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(lowerCamelCase__ , lowerCamelCase__ ):
__UpperCamelCase : Union[str, Any] =load_numpy(
f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1E-2
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
__UpperCamelCase : Dict =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
__UpperCamelCase : Tuple =['vase', 'umbrella']
__UpperCamelCase : List[Any] =pipe.get_label_ids(lowerCamelCase__ )
__UpperCamelCase : str =torch.manual_seed(0 )
__UpperCamelCase : int =pipe(lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(lowerCamelCase__ , lowerCamelCase__ ):
__UpperCamelCase : Dict =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 71 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = FunnelTokenizer
SCREAMING_SNAKE_CASE = FunnelTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Optional[int]:
"""simple docstring"""
super().setUp()
__lowerCAmelCase : str = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , **_SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any , **_SCREAMING_SNAKE_CASE: Any) -> str:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: str) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = "UNwant\u00E9d,running"
__lowerCAmelCase : str = "unwanted, running"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Any = self.tokenizer_class(self.vocab_file)
__lowerCAmelCase : Any = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE) , [7, 4, 5, 10, 8, 9])
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.get_tokenizers(do_lower_case=_SCREAMING_SNAKE_CASE)
for tokenizer in tokenizers:
__lowerCAmelCase : List[str] = tokenizer("UNwant\u00E9d,running")
__lowerCAmelCase : Optional[int] = len(inputs["input_ids"]) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len)
__lowerCAmelCase : List[str] = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running")
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len) | 269 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''CLIPFeatureExtractor''']
lowerCAmelCase__ = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _lowercase ( __snake_case = "laptop" ) -> DataFrame:
__lowerCAmelCase : str = F"""https://www.amazon.in/laptop/s?k={product}"""
__lowerCAmelCase : Union[str, Any] = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
__lowerCAmelCase : List[str] = BeautifulSoup(requests.get(__snake_case ,headers=__snake_case ).text )
# Initialize a Pandas dataframe with the column titles
__lowerCAmelCase : Dict = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" ,attrs={"class": "s-result-item", "data-component-type": "s-search-result"} ,) ,soup.find_all("div" ,attrs={"class": "a-row a-size-base a-color-base"} ) ,):
try:
__lowerCAmelCase : Any = item.ha.text
__lowerCAmelCase : Union[str, Any] = "https://www.amazon.in/" + item.ha.a["href"]
__lowerCAmelCase : Any = item.find("span" ,attrs={"class": "a-offscreen"} ).text
try:
__lowerCAmelCase : Union[str, Any] = item.find("span" ,attrs={"class": "a-icon-alt"} ).text
except AttributeError:
__lowerCAmelCase : Optional[Any] = "Not available"
try:
__lowerCAmelCase : Union[str, Any] = (
"₹"
+ item.find(
"span" ,attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
__lowerCAmelCase : Dict = ""
try:
__lowerCAmelCase : str = float(
(
(
float(product_mrp.strip("₹" ).replace("," ,"" ) )
- float(product_price.strip("₹" ).replace("," ,"" ) )
)
/ float(product_mrp.strip("₹" ).replace("," ,"" ) )
)
* 100 )
except ValueError:
__lowerCAmelCase : List[str] = float("nan" )
except AttributeError:
pass
__lowerCAmelCase : int = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__lowerCAmelCase : Union[str, Any] = " "
__lowerCAmelCase : Union[str, Any] = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__snake_case : Any = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""") | 269 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> "list[int]":
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
__lowerCamelCase : Tuple = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
__lowerCamelCase : Tuple = 1
if upper_limit > 0:
__lowerCamelCase : Optional[Any] = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowerCamelCase__ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
a =int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 73 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=_SCREAMING_SNAKE_CASE , )
assert hasattr(self , "env")
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Any=1) -> Dict:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=_SCREAMING_SNAKE_CASE , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: List[Any]) -> Optional[Any]:
"""simple docstring"""
TrainingJobAnalytics(_SCREAMING_SNAKE_CASE).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""")
def _SCREAMING_SNAKE_CASE ( self: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__lowerCAmelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
__lowerCAmelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
__lowerCAmelCase : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCAmelCase : Tuple = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 99_9999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , _SCREAMING_SNAKE_CASE) | 269 | 0 |
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Dict ):
A = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
A = F'{src_lang}-{tgt_lang}'
A = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(snake_case__ , exist_ok=snake_case__ )
A = os.path.join(snake_case__ , 'README.md' )
print(F'Generating {path}' )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(snake_case__ )
# make sure we are under the root of the project
_lowercase = Path(__file__).resolve().parent.parent.parent
_lowercase = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_lowercase , _lowercase , _lowercase = model_name.split('''-''')
_lowercase = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang) | 74 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__snake_case : Optional[int] = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 269 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : List[str] = logging.get_logger(__name__)
a_ : Dict = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : str ='unispeech'
def __init__( self, lowerCAmelCase=32, lowerCAmelCase=768, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3_072, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-5, lowerCAmelCase="group", lowerCAmelCase="gelu", lowerCAmelCase=(512, 512, 512, 512, 512, 512, 512), lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2), lowerCAmelCase=(10, 3, 3, 3, 3, 2, 2), lowerCAmelCase=False, lowerCAmelCase=128, lowerCAmelCase=16, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=0.0_5, lowerCAmelCase=10, lowerCAmelCase=2, lowerCAmelCase=0.0, lowerCAmelCase=10, lowerCAmelCase=0, lowerCAmelCase=320, lowerCAmelCase=2, lowerCAmelCase=0.1, lowerCAmelCase=100, lowerCAmelCase=256, lowerCAmelCase=256, lowerCAmelCase=0.1, lowerCAmelCase="mean", lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=256, lowerCAmelCase=80, lowerCAmelCase=0, lowerCAmelCase=1, lowerCAmelCase=2, lowerCAmelCase=0.5, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase, pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase )
lowerCamelCase_ =hidden_size
lowerCamelCase_ =feat_extract_norm
lowerCamelCase_ =feat_extract_activation
lowerCamelCase_ =list(lowerCAmelCase )
lowerCamelCase_ =list(lowerCAmelCase )
lowerCamelCase_ =list(lowerCAmelCase )
lowerCamelCase_ =conv_bias
lowerCamelCase_ =num_conv_pos_embeddings
lowerCamelCase_ =num_conv_pos_embedding_groups
lowerCamelCase_ =len(self.conv_dim )
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =feat_proj_dropout
lowerCamelCase_ =final_dropout
lowerCamelCase_ =layerdrop
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_ctc_classes
lowerCamelCase_ =vocab_size
lowerCamelCase_ =do_stable_layer_norm
lowerCamelCase_ =use_weighted_layer_sum
lowerCamelCase_ =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase_ =apply_spec_augment
lowerCamelCase_ =mask_time_prob
lowerCamelCase_ =mask_time_length
lowerCamelCase_ =mask_time_min_masks
lowerCamelCase_ =mask_feature_prob
lowerCamelCase_ =mask_feature_length
lowerCamelCase_ =mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase_ =num_codevectors_per_group
lowerCamelCase_ =num_codevector_groups
lowerCamelCase_ =contrastive_logits_temperature
lowerCamelCase_ =feat_quantizer_dropout
lowerCamelCase_ =num_negatives
lowerCamelCase_ =codevector_dim
lowerCamelCase_ =proj_codevector_dim
lowerCamelCase_ =diversity_loss_weight
# ctc loss
lowerCamelCase_ =ctc_loss_reduction
lowerCamelCase_ =ctc_zero_infinity
# pretraining loss
lowerCamelCase_ =replace_prob
@property
def lowercase__ ( self ):
"""simple docstring"""
return functools.reduce(operator.mul, self.conv_stride, 1 )
| 75 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case : Optional[int] = logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'maskformer-swin'
SCREAMING_SNAKE_CASE = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: int=224 , _SCREAMING_SNAKE_CASE: Tuple=4 , _SCREAMING_SNAKE_CASE: int=3 , _SCREAMING_SNAKE_CASE: List[Any]=96 , _SCREAMING_SNAKE_CASE: Union[str, Any]=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE: Any=[3, 6, 12, 24] , _SCREAMING_SNAKE_CASE: List[str]=7 , _SCREAMING_SNAKE_CASE: List[str]=4.0 , _SCREAMING_SNAKE_CASE: Optional[int]=True , _SCREAMING_SNAKE_CASE: Tuple=0.0 , _SCREAMING_SNAKE_CASE: Any=0.0 , _SCREAMING_SNAKE_CASE: Any=0.1 , _SCREAMING_SNAKE_CASE: str="gelu" , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: Union[str, Any]=0.02 , _SCREAMING_SNAKE_CASE: str=1e-5 , _SCREAMING_SNAKE_CASE: Optional[int]=None , _SCREAMING_SNAKE_CASE: str=None , **_SCREAMING_SNAKE_CASE: Union[str, Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = image_size
__lowerCAmelCase : Any = patch_size
__lowerCAmelCase : Tuple = num_channels
__lowerCAmelCase : Any = embed_dim
__lowerCAmelCase : Any = depths
__lowerCAmelCase : Dict = len(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = num_heads
__lowerCAmelCase : Tuple = window_size
__lowerCAmelCase : Dict = mlp_ratio
__lowerCAmelCase : Any = qkv_bias
__lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
__lowerCAmelCase : int = attention_probs_dropout_prob
__lowerCAmelCase : Tuple = drop_path_rate
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : Optional[int] = use_absolute_embeddings
__lowerCAmelCase : List[str] = layer_norm_eps
__lowerCAmelCase : Any = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCAmelCase : Optional[Any] = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE) - 1))
__lowerCAmelCase : Any = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(_SCREAMING_SNAKE_CASE) + 1)]
__lowerCAmelCase , __lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names) | 269 | 0 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
a_ = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def lowerCamelCase__ ( _a , _a , _a , _a , _a , _a , _a , _a=False , ):
output_path.parent.mkdir(parents=_a , exist_ok=_a)
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_a , _a , f=output_path.as_posix() , input_names=_a , output_names=_a , dynamic_axes=_a , do_constant_folding=_a , use_external_data_format=_a , enable_onnx_checker=_a , opset_version=_a , )
else:
export(
_a , _a , f=output_path.as_posix() , input_names=_a , output_names=_a , dynamic_axes=_a , do_constant_folding=_a , opset_version=_a , )
@torch.no_grad()
def lowerCamelCase__ ( _a , _a , _a , _a = False):
SCREAMING_SNAKE_CASE : str = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
SCREAMING_SNAKE_CASE : Dict = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA")
else:
SCREAMING_SNAKE_CASE : Tuple = "cpu"
SCREAMING_SNAKE_CASE : Any = StableDiffusionPipeline.from_pretrained(_a , torch_dtype=_a).to(_a)
SCREAMING_SNAKE_CASE : List[Any] = Path(_a)
# TEXT ENCODER
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline.text_encoder.config.max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = pipeline.text_encoder.config.hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline.tokenizer(
"A sample prompt" , padding="max_length" , max_length=pipeline.tokenizer.model_max_length , truncation=_a , return_tensors="pt" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=_a , dtype=torch.intaa)) , output_path=output_path / "text_encoder" / "model.onnx" , ordered_input_names=["input_ids"] , output_names=["last_hidden_state", "pooler_output"] , dynamic_axes={
"input_ids": {0: "batch", 1: "sequence"},
} , opset=_a , )
del pipeline.text_encoder
# UNET
SCREAMING_SNAKE_CASE : Dict = pipeline.unet.config.in_channels
SCREAMING_SNAKE_CASE : str = pipeline.unet.config.sample_size
SCREAMING_SNAKE_CASE : List[str] = output_path / "unet" / "model.onnx"
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , _a , _a , _a).to(device=_a , dtype=_a),
torch.randn(2).to(device=_a , dtype=_a),
torch.randn(2 , _a , _a).to(device=_a , dtype=_a),
False,
) , output_path=_a , ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] , output_names=["out_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"timestep": {0: "batch"},
"encoder_hidden_states": {0: "batch", 1: "sequence"},
} , opset=_a , use_external_data_format=_a , )
SCREAMING_SNAKE_CASE : List[Any] = str(unet_path.absolute().as_posix())
SCREAMING_SNAKE_CASE : Tuple = os.path.dirname(_a)
SCREAMING_SNAKE_CASE : List[Any] = onnx.load(_a)
# clean up existing tensor files
shutil.rmtree(_a)
os.mkdir(_a)
# collate external tensor files into one
onnx.save_model(
_a , _a , save_as_external_data=_a , all_tensors_to_one_file=_a , location="weights.pb" , convert_attribute=_a , )
del pipeline.unet
# VAE ENCODER
SCREAMING_SNAKE_CASE : Any = pipeline.vae
SCREAMING_SNAKE_CASE : Tuple = vae_encoder.config.in_channels
SCREAMING_SNAKE_CASE : Optional[Any] = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
SCREAMING_SNAKE_CASE : str = lambda _a , _a: vae_encoder.encode(_a , _a)[0].sample()
onnx_export(
_a , model_args=(
torch.randn(1 , _a , _a , _a).to(device=_a , dtype=_a),
False,
) , output_path=output_path / "vae_encoder" / "model.onnx" , ordered_input_names=["sample", "return_dict"] , output_names=["latent_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=_a , )
# VAE DECODER
SCREAMING_SNAKE_CASE : str = pipeline.vae
SCREAMING_SNAKE_CASE : Tuple = vae_decoder.config.latent_channels
SCREAMING_SNAKE_CASE : str = vae_decoder.config.out_channels
# forward only through the decoder part
SCREAMING_SNAKE_CASE : List[Any] = vae_encoder.decode
onnx_export(
_a , model_args=(
torch.randn(1 , _a , _a , _a).to(device=_a , dtype=_a),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=_a , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
SCREAMING_SNAKE_CASE : int = pipeline.safety_checker
SCREAMING_SNAKE_CASE : List[str] = safety_checker.config.vision_config.num_channels
SCREAMING_SNAKE_CASE : List[Any] = safety_checker.config.vision_config.image_size
SCREAMING_SNAKE_CASE : List[str] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , _a , _a , _a , ).to(device=_a , dtype=_a),
torch.randn(1 , _a , _a , _a).to(device=_a , dtype=_a),
) , output_path=output_path / "safety_checker" / "model.onnx" , ordered_input_names=["clip_input", "images"] , output_names=["out_images", "has_nsfw_concepts"] , dynamic_axes={
"clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"images": {0: "batch", 1: "height", 2: "width", 3: "channels"},
} , opset=_a , )
del pipeline.safety_checker
SCREAMING_SNAKE_CASE : Optional[int] = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker")
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline.feature_extractor
else:
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Any = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder") , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder") , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder") , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / "unet") , scheduler=pipeline.scheduler , safety_checker=_a , feature_extractor=_a , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(_a)
print("ONNX pipeline saved to" , _a)
del pipeline
del onnx_pipeline
SCREAMING_SNAKE_CASE : Any = OnnxStableDiffusionPipeline.from_pretrained(_a , provider="CPUExecutionProvider")
print("ONNX pipeline is loadable")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
a_ = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa) | 76 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class A__ :
'''simple docstring'''
def __init__( self: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = psutil.Process()
__lowerCAmelCase : str = False
def _SCREAMING_SNAKE_CASE ( self: int) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = -1
while True:
__lowerCAmelCase : str = max(self.process.memory_info().rss , self.cpu_memory_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : str = threading.Thread(target=self.peak_monitor)
__lowerCAmelCase : Tuple = True
self.thread.start()
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = False
self.thread.join()
return self.cpu_memory_peak
__snake_case : Tuple = PeakCPUMemory()
def _lowercase ( ) -> str:
# Time
__lowerCAmelCase : str = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase : Optional[Any] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase : Union[str, Any] = torch.cuda.memory_allocated(__snake_case )
torch.cuda.reset_peak_memory_stats()
return measures
def _lowercase ( __snake_case ) -> Optional[Any]:
# Time
__lowerCAmelCase : str = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase : str = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
__lowerCAmelCase : List[str] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase : Union[str, Any] = (torch.cuda.memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**20
__lowerCAmelCase : Any = (torch.cuda.max_memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**20
return measures
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
print(F"""{description}:""" )
print(F"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(__snake_case )]:.2f}MiB""" )
__lowerCAmelCase : Optional[Any] = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" ) | 269 | 0 |
"""simple docstring"""
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def a_ ( ):
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 77 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Any = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'mctct'
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: str=8065 , _SCREAMING_SNAKE_CASE: str=1536 , _SCREAMING_SNAKE_CASE: str=36 , _SCREAMING_SNAKE_CASE: Optional[Any]=6144 , _SCREAMING_SNAKE_CASE: Optional[Any]=4 , _SCREAMING_SNAKE_CASE: Union[str, Any]=384 , _SCREAMING_SNAKE_CASE: Optional[Any]=920 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1e-5 , _SCREAMING_SNAKE_CASE: List[Any]=0.3 , _SCREAMING_SNAKE_CASE: Optional[Any]="relu" , _SCREAMING_SNAKE_CASE: Optional[int]=0.02 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.3 , _SCREAMING_SNAKE_CASE: Dict=0.3 , _SCREAMING_SNAKE_CASE: List[Any]=1 , _SCREAMING_SNAKE_CASE: Optional[Any]=0 , _SCREAMING_SNAKE_CASE: List[str]=2 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1 , _SCREAMING_SNAKE_CASE: Tuple=0.3 , _SCREAMING_SNAKE_CASE: Dict=1 , _SCREAMING_SNAKE_CASE: int=(7,) , _SCREAMING_SNAKE_CASE: str=(3,) , _SCREAMING_SNAKE_CASE: Union[str, Any]=80 , _SCREAMING_SNAKE_CASE: Tuple=1 , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: Tuple="sum" , _SCREAMING_SNAKE_CASE: List[str]=False , **_SCREAMING_SNAKE_CASE: Tuple , ) -> Tuple:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : str = hidden_size
__lowerCAmelCase : str = num_hidden_layers
__lowerCAmelCase : str = intermediate_size
__lowerCAmelCase : List[Any] = num_attention_heads
__lowerCAmelCase : Dict = attention_head_dim
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : str = layer_norm_eps
__lowerCAmelCase : Tuple = layerdrop
__lowerCAmelCase : str = hidden_act
__lowerCAmelCase : List[Any] = initializer_range
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCAmelCase : str = pad_token_id
__lowerCAmelCase : Optional[int] = bos_token_id
__lowerCAmelCase : Union[str, Any] = eos_token_id
__lowerCAmelCase : Any = conv_glu_dim
__lowerCAmelCase : Optional[int] = conv_dropout
__lowerCAmelCase : Union[str, Any] = num_conv_layers
__lowerCAmelCase : Optional[int] = input_feat_per_channel
__lowerCAmelCase : Union[str, Any] = input_channels
__lowerCAmelCase : Optional[Any] = conv_channels
__lowerCAmelCase : Dict = ctc_loss_reduction
__lowerCAmelCase : int = ctc_zero_infinity
# prevents config testing fail with exporting to json
__lowerCAmelCase : List[str] = list(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = list(_SCREAMING_SNAKE_CASE)
if len(self.conv_kernel) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel)}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""") | 269 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ ):
assert column_title.isupper()
UpperCAmelCase = 0
UpperCAmelCase = len(lowercase_ ) - 1
UpperCAmelCase = 0
while index >= 0:
UpperCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , lowercase_ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 78 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
__snake_case : Optional[Any] = [8, 5, 9, 7]
__snake_case : List[Any] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__snake_case : Optional[int] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class A__ :
'''simple docstring'''
def __init__( self: Any , _SCREAMING_SNAKE_CASE: list[int] , _SCREAMING_SNAKE_CASE: list[list[int]] , _SCREAMING_SNAKE_CASE: list[list[int]] , ) -> None:
"""simple docstring"""
__lowerCAmelCase : Any = claim_vector
__lowerCAmelCase : Tuple = allocated_resources_table
__lowerCAmelCase : Tuple = maximum_claim_table
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table)
for i in range(len(self.__allocated_resources_table[0]))
]
def _SCREAMING_SNAKE_CASE ( self: int) -> list[int]:
"""simple docstring"""
return np.array(self.__claim_vector) - np.array(
self.__processes_resource_summation())
def _SCREAMING_SNAKE_CASE ( self: int) -> list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i]) - np.array(_SCREAMING_SNAKE_CASE))
for i, allocated_resource in enumerate(self.__allocated_resources_table)
]
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(_SCREAMING_SNAKE_CASE): i for i in self.__need()}
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , **_SCREAMING_SNAKE_CASE: List[Any]) -> None:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.__need()
__lowerCAmelCase : int = self.__allocated_resources_table
__lowerCAmelCase : Dict = self.__available_resources()
__lowerCAmelCase : str = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n")
while need_list:
__lowerCAmelCase : int = False
for each_need in need_list:
__lowerCAmelCase : Dict = True
for index, need in enumerate(_SCREAMING_SNAKE_CASE):
if need > available_resources[index]:
__lowerCAmelCase : Dict = False
break
if execution:
__lowerCAmelCase : Any = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowerCAmelCase : Union[str, Any] = original_need_index
print(F"""Process {process_number + 1} is executing.""")
# remove the process run from stack
need_list.remove(_SCREAMING_SNAKE_CASE)
# update available/freed resources stack
__lowerCAmelCase : Dict = np.array(_SCREAMING_SNAKE_CASE) + np.array(
alloc_resources_table[process_number])
print(
"Updated available resource stack for processes: "
+ " ".join([str(_SCREAMING_SNAKE_CASE) for x in available_resources]))
break
if safe:
print("The process is in a safe state.\n")
else:
print("System in unsafe state. Aborting...\n")
break
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> List[Any]:
"""simple docstring"""
print(" " * 9 + "Allocated Resource Table")
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(_SCREAMING_SNAKE_CASE) + 1}"""
+ " ".join(F"""{it:>8}""" for it in item)
+ "\n")
print(" " * 9 + "System Resource Table")
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(_SCREAMING_SNAKE_CASE) + 1}"""
+ " ".join(F"""{it:>8}""" for it in item)
+ "\n")
print(
"Current Usage by Active Processes: "
+ " ".join(str(_SCREAMING_SNAKE_CASE) for x in self.__claim_vector))
print(
"Initial Available Resources: "
+ " ".join(str(_SCREAMING_SNAKE_CASE) for x in self.__available_resources()))
time.sleep(1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 269 | 0 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCamelCase_ = 3
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
print("Generating primitive root of p" )
while True:
_A = random.randrange(3 , __lowercase )
if pow(__lowercase , 2 , __lowercase ) == 1:
continue
if pow(__lowercase , __lowercase , __lowercase ) == 1:
continue
return g
def __lowercase ( __lowercase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
'''simple docstring'''
print("Generating prime p..." )
_A = rabin_miller.generate_large_prime(__lowercase ) # select large prime number.
_A = primitive_root(__lowercase ) # one primitive root on modulo p.
_A = random.randrange(3 , __lowercase ) # private_key -> have to be greater than 2 for safety.
_A = cryptomath.find_mod_inverse(pow(__lowercase , __lowercase , __lowercase ) , __lowercase )
_A = (key_size, e_a, e_a, p)
_A = (key_size, d)
return public_key, private_key
def __lowercase ( __lowercase , __lowercase ) -> None:
'''simple docstring'''
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print("\nWARNING:" )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
"Use a different name or delete these files and re-run this program." )
sys.exit()
_A , _A = generate_key(__lowercase )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' , "w" ) as fo:
fo.write(F'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' , "w" ) as fo:
fo.write(F'''{private_key[0]},{private_key[1]}''' )
def __lowercase ( ) -> None:
'''simple docstring'''
print("Making key files..." )
make_key_files("elgamal" , 2048 )
print("Key files generation successful" )
if __name__ == "__main__":
main()
| 79 |
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _lowercase ( *__snake_case ) -> Optional[Any]:
with open(__snake_case ,"r" ) as fh:
fcntl.flock(__snake_case ,fcntl.LOCK_EX )
try:
print(*__snake_case )
finally:
fcntl.flock(__snake_case ,fcntl.LOCK_UN )
__snake_case : List[Any] = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
__snake_case : List[str] = torch.device('cuda', local_rank)
__snake_case : Optional[Any] = socket.gethostname()
__snake_case : str = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__snake_case : Tuple = dist.get_rank()
__snake_case : Any = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise | 269 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase_ ( a__ , a__ , a__ , unittest.TestCase ):
__UpperCAmelCase = StableDiffusionInpaintPipeline
__UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCAmelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCAmelCase = frozenset([] )
def __a ( self ):
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a , )
UpperCamelCase__ = PNDMScheduler(skip_prk_steps=a )
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
UpperCamelCase__ = CLIPTextModel(a )
UpperCamelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCamelCase__ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __a ( self , a , a=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCamelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ = Image.fromarray(np.uinta(a ) ).convert("RGB" ).resize((64, 64) )
UpperCamelCase__ = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(a ).startswith("mps" ):
UpperCamelCase__ = torch.manual_seed(a )
else:
UpperCamelCase__ = torch.Generator(device=a ).manual_seed(a )
UpperCamelCase__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __a ( self ):
UpperCamelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = StableDiffusionInpaintPipeline(**a )
UpperCamelCase__ = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
UpperCamelCase__ = self.get_dummy_inputs(a )
UpperCamelCase__ = sd_pipe(**a ).images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __a ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ):
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
UpperCamelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
UpperCamelCase__ = "stabilityai/stable-diffusion-2-inpainting"
UpperCamelCase__ = StableDiffusionInpaintPipeline.from_pretrained(a , safety_checker=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
UpperCamelCase__ = "Face of a yellow cat, high resolution, sitting on a park bench"
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pipe(
prompt=a , image=a , mask_image=a , generator=a , output_type="np" , )
UpperCamelCase__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def __a ( self ):
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
UpperCamelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
UpperCamelCase__ = "stabilityai/stable-diffusion-2-inpainting"
UpperCamelCase__ = StableDiffusionInpaintPipeline.from_pretrained(
a , torch_dtype=torch.floataa , safety_checker=a , )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
UpperCamelCase__ = "Face of a yellow cat, high resolution, sitting on a park bench"
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pipe(
prompt=a , image=a , mask_image=a , generator=a , output_type="np" , )
UpperCamelCase__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def __a ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
UpperCamelCase__ = "stabilityai/stable-diffusion-2-inpainting"
UpperCamelCase__ = PNDMScheduler.from_pretrained(a , subfolder="scheduler" )
UpperCamelCase__ = StableDiffusionInpaintPipeline.from_pretrained(
a , safety_checker=a , scheduler=a , torch_dtype=torch.floataa , )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCamelCase__ = "Face of a yellow cat, high resolution, sitting on a park bench"
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pipe(
prompt=a , image=a , mask_image=a , generator=a , num_inference_steps=2 , output_type="np" , )
UpperCamelCase__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 80 |
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__snake_case : Optional[int] = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
__snake_case : str = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
__snake_case : str = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def _lowercase ( __snake_case ,__snake_case ) -> Union[str, Any]:
return float((preds == labels).mean() )
def _lowercase ( __snake_case ,__snake_case ) -> str:
__lowerCAmelCase : str = simple_accuracy(__snake_case ,__snake_case )
__lowerCAmelCase : Any = float(fa_score(y_true=__snake_case ,y_pred=__snake_case ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowercase ( __snake_case ,__snake_case ) -> int:
__lowerCAmelCase : Union[str, Any] = np.array(__snake_case )
__lowerCAmelCase : Tuple = np.array(__snake_case )
__lowerCAmelCase : List[Any] = en_sentvecs.shape[0]
# mean centering
__lowerCAmelCase : Union[str, Any] = en_sentvecs - np.mean(__snake_case ,axis=0 )
__lowerCAmelCase : int = in_sentvecs - np.mean(__snake_case ,axis=0 )
__lowerCAmelCase : Optional[Any] = cdist(__snake_case ,__snake_case ,"cosine" )
__lowerCAmelCase : int = np.array(range(__snake_case ) )
__lowerCAmelCase : int = sim.argsort(axis=1 )[:, :10]
__lowerCAmelCase : Optional[Any] = np.any(preds == actual[:, None] ,axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: int) -> str:
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64")
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32")),
"references": datasets.Value("int64")
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32")),
}) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[Any]) -> int:
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]") | 269 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = TransfoXLTokenizer
__lowerCAmelCase = False
__lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self ) -> int:
super().setUp()
a =[
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE ( self , **__A ) -> List[Any]:
a =True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__A )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Any:
a ='''<unk> UNwanted , running'''
a ='''<unk> unwanted, running'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__A )
a =tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(__A , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [0, 4, 8, 7] )
def SCREAMING_SNAKE_CASE ( self ) -> int:
a =TransfoXLTokenizer(lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =TransfoXLTokenizer(lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def SCREAMING_SNAKE_CASE ( self ) -> int:
a =TransfoXLTokenizer(lower_case=__A )
a ='''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
a =[
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(__A ) , __A )
self.assertEqual(tokenizer.convert_tokens_to_string(__A ) , __A )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =self.get_tokenizer()
a =len(__A )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__A ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' ) | 81 |
"""simple docstring"""
def _lowercase ( __snake_case ,__snake_case ) -> float:
if digit_amount > 0:
return round(number - int(__snake_case ) ,__snake_case )
return number - int(__snake_case )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3)) | 269 | 0 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = FlaxAutoencoderKL
@property
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = 4
_lowerCAmelCase = 3
_lowerCAmelCase = (32, 32)
_lowerCAmelCase = jax.random.PRNGKey(0 )
_lowerCAmelCase = jax.random.uniform(_snake_case , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
_lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
| 82 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'torchsde']
def __init__( self: int , *_SCREAMING_SNAKE_CASE: Optional[Any] , **_SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
requires_backends(self , ["torch", "torchsde"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Optional[Any] , *_SCREAMING_SNAKE_CASE: Optional[int] , **_SCREAMING_SNAKE_CASE: Optional[int]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "torchsde"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Dict , *_SCREAMING_SNAKE_CASE: List[Any] , **_SCREAMING_SNAKE_CASE: Any) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch", "torchsde"]) | 269 | 0 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ :
def __init__( self : Any ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : List[str]=13 ,lowerCamelCase__ : Union[str, Any]=30 ,lowerCamelCase__ : Dict=2 ,lowerCamelCase__ : int=3 ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Optional[Any]=32 ,lowerCamelCase__ : List[str]=5 ,lowerCamelCase__ : Optional[Any]=4 ,lowerCamelCase__ : Optional[int]=37 ,lowerCamelCase__ : Dict="gelu" ,lowerCamelCase__ : List[Any]=0.1 ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : Tuple=10 ,lowerCamelCase__ : str=0.0_2 ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : int=0.6 ,lowerCamelCase__ : Tuple=None ,):
'''simple docstring'''
_UpperCamelCase : Dict = parent
_UpperCamelCase : Tuple = batch_size
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : Tuple = patch_size
_UpperCamelCase : Optional[Any] = num_channels
_UpperCamelCase : int = is_training
_UpperCamelCase : int = use_labels
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : Any = num_hidden_layers
_UpperCamelCase : int = num_attention_heads
_UpperCamelCase : List[Any] = intermediate_size
_UpperCamelCase : List[Any] = hidden_act
_UpperCamelCase : Optional[Any] = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : int = type_sequence_label_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : List[Any] = mask_ratio
_UpperCamelCase : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2
_UpperCamelCase : int = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : int = None
if self.use_labels:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_UpperCamelCase : Dict = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase__ ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = ViTMAEModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : Any = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : int ):
'''simple docstring'''
_UpperCamelCase : int = ViTMAEForPreTraining(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : List[str] = model(lowerCamelCase__ )
_UpperCamelCase : List[str] = (self.image_size // self.patch_size) ** 2
_UpperCamelCase : List[str] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_UpperCamelCase : int = 1
_UpperCamelCase : Dict = ViTMAEForPreTraining(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : str = model(lowerCamelCase__ )
_UpperCamelCase : Optional[int] = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[int] = config_and_inputs
_UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( lowercase , lowercase , unittest.TestCase ):
lowercase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowercase__ = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : List[str] = ViTMAEModelTester(self )
_UpperCamelCase : List[str] = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ ,hidden_size=37 )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_UpperCamelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ ,nn.Linear ) )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : str = model_class(lowerCamelCase__ )
_UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : int = [*signature.parameters.keys()]
_UpperCamelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,lowerCamelCase__ )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
# make masks reproducible
np.random.seed(2 )
_UpperCamelCase : int = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_UpperCamelCase : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_UpperCamelCase : Any = torch.from_numpy(lowerCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_UpperCamelCase : Optional[Any] = pt_noise
super().check_pt_tf_models(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_UpperCamelCase : Tuple = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
_UpperCamelCase : Any = outputs[0].cpu().numpy()
_UpperCamelCase : List[str] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ )
_UpperCamelCase : Any = model_class.from_pretrained(lowerCamelCase__ )
model.to(lowerCamelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_UpperCamelCase : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
# Make sure we don't have nans
_UpperCamelCase : str = after_outputs[0].cpu().numpy()
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Dict = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ ,1E-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@slow
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : int = ViTMAEModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A__ ( ):
_UpperCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_UpperCamelCase : str = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(lowerCamelCase__ )
_UpperCamelCase : Any = self.default_image_processor
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : List[str] = image_processor(images=lowerCamelCase__ ,return_tensors='pt' ).to(lowerCamelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_UpperCamelCase : Any = ViTMAEConfig()
_UpperCamelCase : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_UpperCamelCase : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_UpperCamelCase : Any = model(**lowerCamelCase__ ,noise=torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ ) )
# verify the logits
_UpperCamelCase : str = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase__ )
_UpperCamelCase : List[str] = torch.tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(lowerCamelCase__ ) ,atol=1E-4 ) )
| 83 |
"""simple docstring"""
def _lowercase ( ) -> int:
return 1
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(__snake_case )
def _lowercase ( __snake_case = 200 ) -> int:
return two_pound(__snake_case )
if __name__ == "__main__":
print(solution(int(input().strip()))) | 269 | 0 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _snake_case ( lowercase__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
if is_torch_version("""<""" , """2.0.0""" ) or not hasattr(lowercase__ , """_dynamo""" ):
return False
return isinstance(lowercase__ , torch._dynamo.eval_frame.OptimizedModule )
def _snake_case ( lowercase__ : Tuple , lowercase__ : bool = True ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowerCAmelCase_ :List[Any] = is_compiled_module(lowercase__ )
if is_compiled:
lowerCAmelCase_ :Optional[int] = model
lowerCAmelCase_ :Optional[int] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase_ :List[str] = model.module
if not keep_fpaa_wrapper:
lowerCAmelCase_ :Any = getattr(lowercase__ , """forward""" )
lowerCAmelCase_ :List[str] = model.__dict__.pop("""_original_forward""" , lowercase__ )
if original_forward is not None:
while hasattr(lowercase__ , """__wrapped__""" ):
lowerCAmelCase_ :Dict = forward.__wrapped__
if forward == original_forward:
break
lowerCAmelCase_ :int = forward
if getattr(lowercase__ , """_converted_to_transformer_engine""" , lowercase__ ):
convert_model(lowercase__ , to_transformer_engine=lowercase__ )
if is_compiled:
lowerCAmelCase_ :List[Any] = model
lowerCAmelCase_ :Optional[int] = compiled_model
return model
def _snake_case ( ) -> Union[str, Any]:
'''simple docstring'''
PartialState().wait_for_everyone()
def _snake_case ( lowercase__ : List[str] , lowercase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowercase__ , lowercase__ )
elif PartialState().local_process_index == 0:
torch.save(lowercase__ , lowercase__ )
@contextmanager
def _snake_case ( **lowercase__ : Dict ) -> List[str]:
'''simple docstring'''
for key, value in kwargs.items():
lowerCAmelCase_ :Tuple = str(lowercase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _snake_case ( lowercase__ : List[str] ) -> Any:
'''simple docstring'''
if not hasattr(lowercase__ , """__qualname__""" ) and not hasattr(lowercase__ , """__name__""" ):
lowerCAmelCase_ :str = getattr(lowercase__ , """__class__""" , lowercase__ )
if hasattr(lowercase__ , """__qualname__""" ):
return obj.__qualname__
if hasattr(lowercase__ , """__name__""" ):
return obj.__name__
return str(lowercase__ )
def _snake_case ( lowercase__ : int , lowercase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for key, value in source.items():
if isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase_ :Tuple = destination.setdefault(lowercase__ , {} )
merge_dicts(lowercase__ , lowercase__ )
else:
lowerCAmelCase_ :int = value
return destination
def _snake_case ( lowercase__ : int = None ) -> bool:
'''simple docstring'''
if port is None:
lowerCAmelCase_ :Optional[Any] = 2_9_5_0_0
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("""localhost""", port) ) == 0
| 84 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8") as input_file:
__lowerCAmelCase : List[str] = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)")
__lowerCAmelCase : List[Any] = input_file.read()
__lowerCAmelCase : Any = regexp.search(_SCREAMING_SNAKE_CASE)
return match
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: str) -> Optional[Any]:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8") as input_file:
__lowerCAmelCase : Any = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL)
__lowerCAmelCase : Optional[int] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__lowerCAmelCase : int = regexp.finditer(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def _SCREAMING_SNAKE_CASE ( self: str) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = Path("./datasets")
__lowerCAmelCase : Optional[int] = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_SCREAMING_SNAKE_CASE)):
raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""")
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Dict = Path("./datasets")
__lowerCAmelCase : Union[str, Any] = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_print_statements(str(_SCREAMING_SNAKE_CASE)):
raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""") | 269 | 0 |
'''simple docstring'''
import heapq
import sys
import numpy as np
_SCREAMING_SNAKE_CASE : Optional[int] = tuple[int, int]
class _snake_case :
def __init__( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = []
snake_case_ = set()
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return len(self.elements ) == 0
def lowerCAmelCase__ ( self , a__ , a__ ) -> List[str]:
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(a__ )
else:
# update
# print("update", item)
snake_case_ = []
((snake_case_) , (snake_case_)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((snake_case_) , (snake_case_)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def lowerCAmelCase__ ( self , a__ ) -> int:
'''simple docstring'''
if item in self.set:
self.set.remove(a__ )
snake_case_ = []
((snake_case_) , (snake_case_)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((snake_case_) , (snake_case_)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return self.elements[0][1]
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
((snake_case_) , (snake_case_)) = heapq.heappop(self.elements )
self.set.remove(a__ )
return (priority, item)
def UpperCamelCase_( snake_case : TPos , snake_case : TPos ):
'''simple docstring'''
snake_case_ = np.array(snake_case )
snake_case_ = np.array(snake_case )
return np.linalg.norm(a - b )
def UpperCamelCase_( snake_case : TPos , snake_case : TPos ):
'''simple docstring'''
return consistent_heuristic(snake_case , snake_case ) // t
def UpperCamelCase_( snake_case : TPos , snake_case : TPos ):
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def UpperCamelCase_( snake_case : TPos , snake_case : int , snake_case : TPos , snake_case : dict[TPos, float] ):
'''simple docstring'''
snake_case_ = g_function[start] + Wa * heuristics[i](snake_case , snake_case )
return ans
def UpperCamelCase_( snake_case : List[Any] , snake_case : List[Any] , snake_case : int ):
'''simple docstring'''
snake_case_ = np.chararray((n, n) )
for i in range(snake_case ):
for j in range(snake_case ):
snake_case_ = "*"
for i in range(snake_case ):
for j in range(snake_case ):
if (j, (n - 1) - i) in blocks:
snake_case_ = "#"
snake_case_ = "-"
snake_case_ = back_pointer[goal]
while x != start:
((snake_case_) , (snake_case_)) = x
# print(x)
snake_case_ = "-"
snake_case_ = back_pointer[x]
snake_case_ = "-"
for i in range(snake_case ):
for j in range(snake_case ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
snake_case_ = back_pointer[goal]
while x != start:
print(snake_case , end=" " )
snake_case_ = back_pointer[x]
print(snake_case )
sys.exit()
def UpperCamelCase_( snake_case : TPos ):
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def UpperCamelCase_( snake_case : List[Any] , snake_case : int , snake_case : Tuple , snake_case : int , snake_case : List[str] , snake_case : Tuple , snake_case : Dict , snake_case : Dict , ):
'''simple docstring'''
for itera in range(snake_case ):
open_list[itera].remove_element(snake_case )
# print("s", s)
# print("j", j)
((snake_case_) , (snake_case_)) = s
snake_case_ = (x - 1, y)
snake_case_ = (x + 1, y)
snake_case_ = (x, y + 1)
snake_case_ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(snake_case ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(snake_case )
snake_case_ = -1
snake_case_ = float("inf" )
if valid(snake_case ) and g_function[neighbours] > g_function[s] + 1:
snake_case_ = g_function[s] + 1
snake_case_ = s
if neighbours not in close_list_anchor:
open_list[0].put(snake_case , key(snake_case , 0 , snake_case , snake_case ) )
if neighbours not in close_list_inad:
for var in range(1 , snake_case ):
if key(snake_case , snake_case , snake_case , snake_case ) <= Wa * key(
snake_case , 0 , snake_case , snake_case ):
open_list[j].put(
snake_case , key(snake_case , snake_case , snake_case , snake_case ) )
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
_SCREAMING_SNAKE_CASE : Optional[int] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
_SCREAMING_SNAKE_CASE : int = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
_SCREAMING_SNAKE_CASE : Optional[int] = make_common_ground()
_SCREAMING_SNAKE_CASE : List[str] = blocks_blk
# hyper parameters
_SCREAMING_SNAKE_CASE : Tuple = 1
_SCREAMING_SNAKE_CASE : Optional[int] = 1
_SCREAMING_SNAKE_CASE : int = 20
_SCREAMING_SNAKE_CASE : Tuple = 3 # one consistent and two other inconsistent
# start and end destination
_SCREAMING_SNAKE_CASE : List[str] = (0, 0)
_SCREAMING_SNAKE_CASE : Optional[int] = (n - 1, n - 1)
_SCREAMING_SNAKE_CASE : Tuple = 1
def UpperCamelCase_( snake_case : TPos , snake_case : TPos , snake_case : int ):
'''simple docstring'''
snake_case_ = {start: 0, goal: float("inf" )}
snake_case_ = {start: -1, goal: -1}
snake_case_ = []
snake_case_ = set()
for i in range(snake_case ):
open_list.append(PriorityQueue() )
open_list[i].put(snake_case , key(snake_case , snake_case , snake_case , snake_case ) )
snake_case_ = []
snake_case_ = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , snake_case ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(snake_case , snake_case , snake_case )
else:
snake_case_ , snake_case_ = open_list[i].top_show()
visited.add(snake_case )
expand_state(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , )
close_list_inad.append(snake_case )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(snake_case , snake_case , snake_case )
else:
snake_case_ = open_list[0].top_show()
visited.add(snake_case )
expand_state(
snake_case , 0 , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , )
close_list_anchor.append(snake_case )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(snake_case ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 85 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__snake_case : Optional[Any] = logging.get_logger(__name__)
__snake_case : Optional[Any] = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
__snake_case : Any = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
__snake_case : str = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'whisper'
SCREAMING_SNAKE_CASE = ['past_key_values']
SCREAMING_SNAKE_CASE = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: Any=5_1865 , _SCREAMING_SNAKE_CASE: Optional[Any]=80 , _SCREAMING_SNAKE_CASE: Optional[int]=6 , _SCREAMING_SNAKE_CASE: Any=4 , _SCREAMING_SNAKE_CASE: Dict=6 , _SCREAMING_SNAKE_CASE: Dict=4 , _SCREAMING_SNAKE_CASE: Optional[Any]=1536 , _SCREAMING_SNAKE_CASE: List[str]=1536 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: str=0.0 , _SCREAMING_SNAKE_CASE: List[str]=5_0257 , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: List[str]=True , _SCREAMING_SNAKE_CASE: Optional[int]="gelu" , _SCREAMING_SNAKE_CASE: Tuple=256 , _SCREAMING_SNAKE_CASE: str=0.0 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: List[Any]=0.02 , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: Union[str, Any]=1500 , _SCREAMING_SNAKE_CASE: str=448 , _SCREAMING_SNAKE_CASE: Any=5_0256 , _SCREAMING_SNAKE_CASE: Any=5_0256 , _SCREAMING_SNAKE_CASE: List[str]=5_0256 , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: List[Any]=[220, 5_0256] , _SCREAMING_SNAKE_CASE: Dict=False , _SCREAMING_SNAKE_CASE: str=256 , _SCREAMING_SNAKE_CASE: List[str]=False , _SCREAMING_SNAKE_CASE: Tuple=0.05 , _SCREAMING_SNAKE_CASE: List[str]=10 , _SCREAMING_SNAKE_CASE: str=2 , _SCREAMING_SNAKE_CASE: Any=0.0 , _SCREAMING_SNAKE_CASE: Optional[int]=10 , _SCREAMING_SNAKE_CASE: int=0 , _SCREAMING_SNAKE_CASE: Any=7 , **_SCREAMING_SNAKE_CASE: List[str] , ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = vocab_size
__lowerCAmelCase : Optional[Any] = num_mel_bins
__lowerCAmelCase : int = d_model
__lowerCAmelCase : List[Any] = encoder_layers
__lowerCAmelCase : List[Any] = encoder_attention_heads
__lowerCAmelCase : List[str] = decoder_layers
__lowerCAmelCase : Tuple = decoder_attention_heads
__lowerCAmelCase : Any = decoder_ffn_dim
__lowerCAmelCase : Tuple = encoder_ffn_dim
__lowerCAmelCase : List[str] = dropout
__lowerCAmelCase : Union[str, Any] = attention_dropout
__lowerCAmelCase : Union[str, Any] = activation_dropout
__lowerCAmelCase : Dict = activation_function
__lowerCAmelCase : Tuple = init_std
__lowerCAmelCase : str = encoder_layerdrop
__lowerCAmelCase : int = decoder_layerdrop
__lowerCAmelCase : Optional[int] = use_cache
__lowerCAmelCase : Union[str, Any] = encoder_layers
__lowerCAmelCase : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase : int = max_source_positions
__lowerCAmelCase : Any = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase : Dict = classifier_proj_size
__lowerCAmelCase : Dict = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase : int = apply_spec_augment
__lowerCAmelCase : Union[str, Any] = mask_time_prob
__lowerCAmelCase : str = mask_time_length
__lowerCAmelCase : int = mask_time_min_masks
__lowerCAmelCase : List[Any] = mask_feature_prob
__lowerCAmelCase : Tuple = mask_feature_length
__lowerCAmelCase : Any = mask_feature_min_masks
__lowerCAmelCase : Union[str, Any] = median_filter_width
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , suppress_tokens=_SCREAMING_SNAKE_CASE , begin_suppress_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__lowerCAmelCase : List[str] = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
])
if self.use_past:
__lowerCAmelCase : Tuple = {0: "batch"}
else:
__lowerCAmelCase : Union[str, Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction="inputs")
return common_inputs
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: Optional["TensorType"] = None , _SCREAMING_SNAKE_CASE: int = 2_2050 , _SCREAMING_SNAKE_CASE: float = 5.0 , _SCREAMING_SNAKE_CASE: int = 220 , ) -> Mapping[str, Any]:
"""simple docstring"""
__lowerCAmelCase : int = OrderedDict()
__lowerCAmelCase : Optional[Any] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , time_duration=_SCREAMING_SNAKE_CASE , frequency=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[str] = encoder_inputs["input_features"].shape[2]
__lowerCAmelCase : List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
__lowerCAmelCase : List[Any] = super().generate_dummy_inputs(
preprocessor.tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = encoder_inputs.pop("input_features")
__lowerCAmelCase : List[Any] = decoder_inputs.pop("decoder_input_ids")
if "past_key_values" in decoder_inputs:
__lowerCAmelCase : int = decoder_inputs.pop("past_key_values")
return dummy_inputs
@property
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> float:
"""simple docstring"""
return 1e-3 | 269 | 0 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class A__ ( _lowerCamelCase):
A_ : Any = ['image_processor', 'tokenizer']
A_ : Optional[Any] = 'AutoImageProcessor'
A_ : str = 'AutoTokenizer'
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = kwargs.pop('feature_extractor' )
__lowerCAmelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = self.image_processor
__lowerCAmelCase : Tuple = False
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = kwargs.pop('images' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = kwargs.pop('text' , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
__lowerCAmelCase : Dict = args[0]
__lowerCAmelCase : Union[str, Any] = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__lowerCAmelCase : Union[str, Any] = self.image_processor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None:
__lowerCAmelCase : Dict = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCAmelCase : Union[str, Any] = encodings['input_ids']
return inputs
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@contextmanager
def __lowerCamelCase ( self ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
__lowerCAmelCase : Any = True
__lowerCAmelCase : Dict = self.tokenizer
yield
__lowerCAmelCase : Optional[int] = self.image_processor
__lowerCAmelCase : Optional[Any] = False
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None ):
if added_vocab is None:
__lowerCAmelCase : str = self.tokenizer.get_added_vocab()
__lowerCAmelCase : List[Any] = {}
while tokens:
__lowerCAmelCase : int = re.search(R'<s_(.*?)>' , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
if start_token is None:
break
__lowerCAmelCase : Union[str, Any] = start_token.group(1 )
__lowerCAmelCase : Tuple = re.search(Rf"</s_{key}>" , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
__lowerCAmelCase : str = start_token.group()
if end_token is None:
__lowerCAmelCase : Optional[int] = tokens.replace(_SCREAMING_SNAKE_CASE , '' )
else:
__lowerCAmelCase : Optional[Any] = end_token.group()
__lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = re.search(f"{start_token_escaped}(.*?){end_token_escaped}" , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
if content is not None:
__lowerCAmelCase : List[str] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__lowerCAmelCase : int = self.tokenajson(_SCREAMING_SNAKE_CASE , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE )
if value:
if len(_SCREAMING_SNAKE_CASE ) == 1:
__lowerCAmelCase : Tuple = value[0]
__lowerCAmelCase : Tuple = value
else: # leaf nodes
__lowerCAmelCase : Any = []
for leaf in content.split(R'<sep/>' ):
__lowerCAmelCase : List[Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__lowerCAmelCase : Dict = leaf[1:-2] # for categorical special tokens
output[key].append(_SCREAMING_SNAKE_CASE )
if len(output[key] ) == 1:
__lowerCAmelCase : str = output[key][0]
__lowerCAmelCase : Dict = tokens[tokens.find(_SCREAMING_SNAKE_CASE ) + len(_SCREAMING_SNAKE_CASE ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor | 86 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__snake_case : Optional[int] = 50_000
__snake_case : Dict = 5_000
__snake_case , __snake_case : Union[str, Any] = os.path.split(__file__)
__snake_case : Any = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
for i in range(__snake_case ):
__lowerCAmelCase : Union[str, Any] = dataset[i]
@get_duration
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Dict:
for i in range(0 ,len(__snake_case ) ,__snake_case ):
__lowerCAmelCase : List[str] = dataset[i : i + batch_size]
@get_duration
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Dict:
with dataset.formatted_as(type=__snake_case ):
for i in range(__snake_case ):
__lowerCAmelCase : Union[str, Any] = dataset[i]
@get_duration
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ) -> str:
with dataset.formatted_as(type=__snake_case ):
for i in range(0 ,__snake_case ,__snake_case ):
__lowerCAmelCase : Optional[int] = dataset[i : i + batch_size]
def _lowercase ( ) -> Union[str, Any]:
__lowerCAmelCase : Optional[int] = {"num examples": SPEED_TEST_N_EXAMPLES}
__lowerCAmelCase : Optional[int] = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
__lowerCAmelCase : Any = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
__lowerCAmelCase : int = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
__lowerCAmelCase : str = generate_example_dataset(
os.path.join(__snake_case ,"dataset.arrow" ) ,__snake_case ,num_examples=__snake_case ,seq_shapes={"list": (100,)} ,)
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ ,str(__snake_case ) )
__lowerCAmelCase : str = func(__snake_case ,**__snake_case )
print("shuffling dataset" )
__lowerCAmelCase : Optional[int] = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " ,func.__name__ ,str(__snake_case ) )
__lowerCAmelCase : List[str] = func(
__snake_case ,**__snake_case )
with open(__snake_case ,"wb" ) as f:
f.write(json.dumps(__snake_case ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating() | 269 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : str ) -> List[str]:
lowercase__ : List[str] = "ZinengTang/tvlt-base"
lowercase__ : str = tempfile.mkdtemp()
def __UpperCamelCase ( self : str , **lowercase_ : int ) -> Optional[Any]:
return TvltImageProcessor.from_pretrained(self.checkpoint , **lowercase_ )
def __UpperCamelCase ( self : List[str] , **lowercase_ : Optional[Any] ) -> Tuple:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **lowercase_ )
def __UpperCamelCase ( self : str ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : int ) -> Optional[int]:
lowercase__ : int = self.get_image_processor()
lowercase__ : Optional[int] = self.get_feature_extractor()
lowercase__ : List[Any] = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_ )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Optional[Any] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , lowercase_ )
self.assertIsInstance(processor.image_processor , lowercase_ )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
lowercase__ : Optional[int] = self.get_image_processor()
lowercase__ : Tuple = self.get_feature_extractor()
lowercase__ : List[Any] = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_ )
lowercase__ : List[str] = np.ones([1_20_00] )
lowercase__ : int = feature_extractor(lowercase_ , return_tensors="np" )
lowercase__ : Any = processor(audio=lowercase_ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
lowercase__ : Dict = self.get_image_processor()
lowercase__ : Union[str, Any] = self.get_feature_extractor()
lowercase__ : Tuple = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_ )
lowercase__ : Optional[int] = np.ones([3, 2_24, 2_24] )
lowercase__ : Optional[Any] = image_processor(lowercase_ , return_tensors="np" )
lowercase__ : Optional[int] = processor(images=lowercase_ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
lowercase__ : Optional[Any] = self.get_image_processor()
lowercase__ : List[Any] = self.get_feature_extractor()
lowercase__ : Tuple = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_ )
lowercase__ : str = np.ones([1_20_00] )
lowercase__ : Dict = np.ones([3, 2_24, 2_24] )
lowercase__ : Any = processor(audio=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
lowercase__ : str = self.get_image_processor()
lowercase__ : Dict = self.get_feature_extractor()
lowercase__ : List[str] = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 87 |
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[str]=13 , _SCREAMING_SNAKE_CASE: Tuple=7 , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: str=False , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: int=99 , _SCREAMING_SNAKE_CASE: int=32 , _SCREAMING_SNAKE_CASE: List[str]=5 , _SCREAMING_SNAKE_CASE: Union[str, Any]=4 , _SCREAMING_SNAKE_CASE: int=64 , _SCREAMING_SNAKE_CASE: List[str]="gelu" , _SCREAMING_SNAKE_CASE: str=0.1 , _SCREAMING_SNAKE_CASE: Any=0.1 , _SCREAMING_SNAKE_CASE: Optional[int]=512 , _SCREAMING_SNAKE_CASE: Tuple=16 , _SCREAMING_SNAKE_CASE: Any=2 , _SCREAMING_SNAKE_CASE: List[str]=0.02 , _SCREAMING_SNAKE_CASE: Tuple=3 , _SCREAMING_SNAKE_CASE: Optional[Any]=4 , _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: int=2 , _SCREAMING_SNAKE_CASE: str=2 , _SCREAMING_SNAKE_CASE: Union[str, Any]=2 , _SCREAMING_SNAKE_CASE: List[Any]=2 , _SCREAMING_SNAKE_CASE: int=4 , _SCREAMING_SNAKE_CASE: List[str]=1 , ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = parent
__lowerCAmelCase : Optional[Any] = batch_size
__lowerCAmelCase : Union[str, Any] = seq_length
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : Optional[int] = use_input_mask
__lowerCAmelCase : Dict = use_token_type_ids
__lowerCAmelCase : Dict = use_labels
__lowerCAmelCase : Dict = vocab_size
__lowerCAmelCase : Tuple = hidden_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : Tuple = intermediate_size
__lowerCAmelCase : List[Any] = hidden_act
__lowerCAmelCase : Optional[Any] = hidden_dropout_prob
__lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : Union[str, Any] = type_vocab_size
__lowerCAmelCase : Optional[int] = type_sequence_label_size
__lowerCAmelCase : Dict = initializer_range
__lowerCAmelCase : Tuple = num_labels
__lowerCAmelCase : Optional[Any] = num_choices
__lowerCAmelCase : Union[str, Any] = scope
__lowerCAmelCase : Optional[Any] = q_groups
__lowerCAmelCase : Optional[int] = k_groups
__lowerCAmelCase : Any = v_groups
__lowerCAmelCase : int = post_attention_groups
__lowerCAmelCase : List[str] = intermediate_groups
__lowerCAmelCase : Optional[Any] = output_groups
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
__lowerCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length])
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : str = None
if self.use_labels:
__lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices)
__lowerCAmelCase : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Tuple) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[Any] = SqueezeBertModel(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Tuple) -> Dict:
"""simple docstring"""
__lowerCAmelCase : int = SqueezeBertForMaskedLM(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : str = SqueezeBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Union[str, Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Tuple) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.num_labels
__lowerCAmelCase : Union[str, Any] = SqueezeBertForSequenceClassification(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.num_labels
__lowerCAmelCase : Optional[int] = SqueezeBertForTokenClassification(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.num_choices
__lowerCAmelCase : str = SqueezeBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : int = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase : Union[str, Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase : str = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _SCREAMING_SNAKE_CASE ( self: str) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)) : Union[str, Any] = config_and_inputs
__lowerCAmelCase : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = SqueezeBertModelTester(self)
__lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , dim=37)
def _SCREAMING_SNAKE_CASE ( self: Any) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE)
@slow
def _SCREAMING_SNAKE_CASE ( self: Any) -> Dict:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[Any] = SqueezeBertModel.from_pretrained(_SCREAMING_SNAKE_CASE)
self.assertIsNotNone(_SCREAMING_SNAKE_CASE)
@require_sentencepiece
@require_tokenizers
@require_torch
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: int) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli")
__lowerCAmelCase : List[Any] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]])
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE)[0]
__lowerCAmelCase : Any = torch.Size((1, 3))
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = torch.tensor([[0.6401, -0.0349, -0.6041]])
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-4)) | 269 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : List[str] = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 |
"""simple docstring"""
import os
from math import logaa
def _lowercase ( __snake_case = "base_exp.txt" ) -> int:
__lowerCAmelCase : float = 0
__lowerCAmelCase : Any = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__snake_case ) ,__snake_case ) ) ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = list(map(__snake_case ,line.split("," ) ) )
if x * logaa(__snake_case ) > largest:
__lowerCAmelCase : Tuple = x * logaa(__snake_case )
__lowerCAmelCase : Optional[Any] = i + 1
return result
if __name__ == "__main__":
print(solution()) | 269 | 0 |
'''simple docstring'''
__lowerCAmelCase = tuple[float, float, float]
__lowerCAmelCase = tuple[float, float, float]
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Vectorad:
_a : str = end_pointa[0] - end_pointa[0]
_a : Optional[int] = end_pointa[1] - end_pointa[1]
_a : str = end_pointa[2] - end_pointa[2]
return (x, y, z)
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Vectorad:
_a : Optional[Any] = ab[1] * ac[2] - ab[2] * ac[1] # *i
_a : List[str] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
_a : Dict = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> bool:
return tuple(round(lowerCAmelCase_ , lowerCAmelCase_ ) for x in vector ) == (0, 0, 0)
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10 ) -> bool:
_a : Optional[int] = create_vector(lowerCAmelCase_ , lowerCAmelCase_ )
_a : List[str] = create_vector(lowerCAmelCase_ , lowerCAmelCase_ )
return is_zero_vector(get_ad_vectors_cross(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
| 89 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _lowercase ( __snake_case ) -> List[str]:
if isinstance(__snake_case ,collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class A__ :
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Optional[Any]) -> str:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: str) -> int:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Tuple:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: float) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : str = np.abs((a - b)).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , F"""Difference between torch and flax is {diff} (>= {tol}).""")
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Any=None , **_SCREAMING_SNAKE_CASE: Tuple) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim))
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Any]=None , **_SCREAMING_SNAKE_CASE: List[str]) -> Dict:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim))
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any]=None , **_SCREAMING_SNAKE_CASE: Tuple) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = after_output[0]
__lowerCAmelCase : Any = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Any=None , **_SCREAMING_SNAKE_CASE: List[str]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Any = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model(
input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase : List[str] = to_atuple(vision_model.config.image_size)
__lowerCAmelCase : Any = to_atuple(vision_model.config.patch_size)
__lowerCAmelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCAmelCase : Tuple = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
__lowerCAmelCase : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: int) -> str:
"""simple docstring"""
pt_model.to(_SCREAMING_SNAKE_CASE)
pt_model.eval()
# prepare inputs
__lowerCAmelCase : Union[str, Any] = inputs_dict
__lowerCAmelCase : Union[str, Any] = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowerCAmelCase : Any = pt_model(**_SCREAMING_SNAKE_CASE).to_tuple()
__lowerCAmelCase : List[Any] = fx_model(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2)
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = fx_model_loaded(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2)
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE)
pt_model_loaded.to(_SCREAMING_SNAKE_CASE)
pt_model_loaded.eval()
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = pt_model_loaded(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output_loaded.numpy() , 4e-2)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Dict) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = fx_state
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Dict) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params)
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Dict) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.prepare_config_and_inputs()
self.check_save_load(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: int) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_SCREAMING_SNAKE_CASE)
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Dict = self.prepare_config_and_inputs()
__lowerCAmelCase : List[Any] = config_inputs_dict.pop("vision_config")
__lowerCAmelCase : str = config_inputs_dict.pop("text_config")
__lowerCAmelCase : Union[str, Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.check_equivalence_flax_to_pt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
@slow
def _SCREAMING_SNAKE_CASE ( self: str) -> Dict:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Dict = self.get_pretrained_model_and_inputs()
__lowerCAmelCase : Union[str, Any] = model_a(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model_a(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = after_outputs[0]
__lowerCAmelCase : List[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-5)
@require_flax
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = 13
__lowerCAmelCase : Optional[int] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
__lowerCAmelCase : List[Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
__lowerCAmelCase : List[Any] = random_attention_mask([batch_size, 4])
__lowerCAmelCase : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[str] = FlaxViTModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = FlaxBertModel(_SCREAMING_SNAKE_CASE)
return vision_model, text_model
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = FlaxViTModelTester(self)
__lowerCAmelCase : Optional[Any] = FlaxBertModelTester(self)
__lowerCAmelCase : int = vit_model_tester.prepare_config_and_inputs()
__lowerCAmelCase : List[str] = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase : Tuple = vision_config_and_inputs
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = 13
__lowerCAmelCase : List[str] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
__lowerCAmelCase : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
__lowerCAmelCase : str = random_attention_mask([batch_size, 4])
__lowerCAmelCase : Optional[int] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : int = FlaxCLIPVisionModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = FlaxBertModel(_SCREAMING_SNAKE_CASE)
return vision_model, text_model
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = FlaxCLIPVisionModelTester(self)
__lowerCAmelCase : str = FlaxBertModelTester(self)
__lowerCAmelCase : Optional[Any] = clip_model_tester.prepare_config_and_inputs()
__lowerCAmelCase : Dict = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase : Any = vision_config_and_inputs
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0)
__lowerCAmelCase : str = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian")
__lowerCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
__lowerCAmelCase : Optional[int] = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="np")
__lowerCAmelCase : List[str] = model(**_SCREAMING_SNAKE_CASE)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowerCAmelCase : List[str] = np.array([[1.228_4727, 0.310_4122]])
self.assertTrue(np.allclose(outputs.logits_per_image , _SCREAMING_SNAKE_CASE , atol=1e-3)) | 269 | 0 |
from manim import *
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = Rectangle(height=0.5 , width=0.5 )
__lowerCamelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__lowerCamelCase = [mem.copy() for i in range(6 )]
__lowerCamelCase = [mem.copy() for i in range(6 )]
__lowerCamelCase = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__lowerCamelCase = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__lowerCamelCase = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__lowerCamelCase = Text('CPU' , font_size=24 )
__lowerCamelCase = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase__ )
__lowerCamelCase = [mem.copy() for i in range(4 )]
__lowerCamelCase = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__lowerCamelCase = Text('GPU' , font_size=24 )
__lowerCamelCase = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase__ )
__lowerCamelCase = [mem.copy() for i in range(6 )]
__lowerCamelCase = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__lowerCamelCase = Text('Model' , font_size=24 )
__lowerCamelCase = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase__ )
__lowerCamelCase = []
for i, rect in enumerate(lowerCamelCase__ ):
rect.set_stroke(lowerCamelCase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
__lowerCamelCase = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCamelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowerCamelCase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowerCamelCase__ , buff=0.0 )
self.add(lowerCamelCase__ )
cpu_targs.append(lowerCamelCase__ )
__lowerCamelCase = [mem.copy() for i in range(6 )]
__lowerCamelCase = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__lowerCamelCase = Text('Loaded Checkpoint' , font_size=24 )
__lowerCamelCase = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , aligned_edge=lowerCamelCase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
__lowerCamelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowerCamelCase = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowerCamelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
__lowerCamelCase = MarkupText(
f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ ) , Write(lowerCamelCase__ ) )
self.play(Write(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) )
__lowerCamelCase = []
__lowerCamelCase = []
for i, rect in enumerate(lowerCamelCase__ ):
__lowerCamelCase = fill.copy().set_fill(lowerCamelCase__ , opacity=0.7 )
target.move_to(lowerCamelCase__ )
first_animations.append(GrowFromCenter(lowerCamelCase__ , run_time=1 ) )
__lowerCamelCase = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowerCamelCase__ , run_time=1.5 ) )
self.play(*lowerCamelCase__ )
self.play(*lowerCamelCase__ )
self.wait()
| 90 |
"""simple docstring"""
from __future__ import annotations
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ) -> list:
__lowerCAmelCase : Dict = []
__lowerCAmelCase , __lowerCAmelCase : Any = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__lowerCAmelCase : int = result + left + right
return input_list
def _lowercase ( __snake_case ) -> list:
if len(__snake_case ) <= 1:
return input_list
__lowerCAmelCase : int = list(__snake_case )
# iteration for two-way merging
__lowerCAmelCase : Optional[int] = 2
while p <= len(__snake_case ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 ,len(__snake_case ) ,__snake_case ):
__lowerCAmelCase : Union[str, Any] = i
__lowerCAmelCase : Tuple = i + p - 1
__lowerCAmelCase : Optional[Any] = (low + high + 1) // 2
__lowerCAmelCase : Any = merge(__snake_case ,__snake_case ,__snake_case ,__snake_case )
# final merge of last two parts
if p * 2 >= len(__snake_case ):
__lowerCAmelCase : Optional[Any] = i
__lowerCAmelCase : Union[str, Any] = merge(__snake_case ,0 ,__snake_case ,len(__snake_case ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__snake_case : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
__snake_case : Optional[int] = []
else:
__snake_case : int = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted)) | 269 | 0 |
"""simple docstring"""
def _A (__a ) -> list:
"""simple docstring"""
if len(__a ) < 2:
return collection
def circle_sort_util(__a , __a , __a ) -> bool:
SCREAMING_SNAKE_CASE_ : Tuple = False
if low == high:
return swapped
SCREAMING_SNAKE_CASE_ : Any = low
SCREAMING_SNAKE_CASE_ : List[Any] = high
while left < right:
if collection[left] > collection[right]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = (
collection[right],
collection[left],
)
SCREAMING_SNAKE_CASE_ : str = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = (
collection[right + 1],
collection[left],
)
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = low + int((high - low) / 2 )
SCREAMING_SNAKE_CASE_ : Optional[int] = circle_sort_util(__a , __a , __a )
SCREAMING_SNAKE_CASE_ : Optional[int] = circle_sort_util(__a , mid + 1 , __a )
return swapped or left_swap or right_swap
SCREAMING_SNAKE_CASE_ : str = True
while is_not_sorted is True:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = circle_sort_util(__a , 0 , len(__a ) - 1 )
return collection
if __name__ == "__main__":
UpperCAmelCase_ : int = input("""Enter numbers separated by a comma:\n""").strip()
UpperCAmelCase_ : Tuple = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 91 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small")
__lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("google/mt5-small")
__lowerCAmelCase : Tuple = tokenizer("Hello there" , return_tensors="np").input_ids
__lowerCAmelCase : Dict = tokenizer("Hi I am" , return_tensors="np").input_ids
__lowerCAmelCase : str = shift_tokens_right(_SCREAMING_SNAKE_CASE , model.config.pad_token_id , model.config.decoder_start_token_id)
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE).logits
__lowerCAmelCase : int = optax.softmax_cross_entropy(_SCREAMING_SNAKE_CASE , onehot(_SCREAMING_SNAKE_CASE , logits.shape[-1])).mean()
__lowerCAmelCase : List[str] = -(labels.shape[-1] * loss.item())
__lowerCAmelCase : str = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) | 269 | 0 |
import argparse
import os
import re
import packaging.version
UpperCamelCase__ = """examples/"""
UpperCamelCase__ = {
"""examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
UpperCamelCase__ = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
UpperCamelCase__ = """README.md"""
def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ):
with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase , __lowerCAmelCase = REPLACE_PATTERNS[pattern]
__lowerCAmelCase = replace.replace("VERSION" , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = re_pattern.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
for folder, directories, fnames in os.walk(SCREAMING_SNAKE_CASE_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , pattern="examples" )
def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not patch:
update_version_in_examples(SCREAMING_SNAKE_CASE_ )
def _a ( ):
__lowerCAmelCase = "🤗 Transformers currently provides the following architectures"
__lowerCAmelCase = "1. Want to contribute a new model?"
with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.readlines()
# Find the start of the list.
__lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
__lowerCAmelCase = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(SCREAMING_SNAKE_CASE_ )
def _a ( ):
with open(REPLACE_FILES["init"] , "r" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(SCREAMING_SNAKE_CASE_ ).groups()[0]
return packaging.version.parse(SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : List[Any]=False ):
__lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
__lowerCAmelCase = default_version.base_version
elif patch:
__lowerCAmelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
__lowerCAmelCase = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
__lowerCAmelCase = input(F"""Which version are you releasing? [{default_version}]""" )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
__lowerCAmelCase = default_version
print(F"""Updating version to {version}.""" )
global_version_update(SCREAMING_SNAKE_CASE_ , patch=SCREAMING_SNAKE_CASE_ )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def _a ( ):
__lowerCAmelCase = get_version()
__lowerCAmelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
__lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
__lowerCAmelCase = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
__lowerCAmelCase = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(SCREAMING_SNAKE_CASE_ )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
UpperCamelCase__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 92 |
"""simple docstring"""
import re
def _lowercase ( __snake_case ) -> str:
if len(re.findall("[ATCG]" ,__snake_case ) ) != len(__snake_case ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" ,"TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 269 | 0 |
'''simple docstring'''
import random
def snake_case_ ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ : Optional[int] = [], [], []
for element in data:
if element < pivot:
less.append(__SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(__SCREAMING_SNAKE_CASE )
else:
equal.append(__SCREAMING_SNAKE_CASE )
return less, equal, greater
def snake_case_ ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if index >= len(__SCREAMING_SNAKE_CASE ) or index < 0:
return None
lowercase_ : Dict = items[random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )]
lowercase_ : Optional[int] = 0
lowercase_ , lowercase_ , lowercase_ : Optional[int] = _partition(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : str = len(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = len(__SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(__SCREAMING_SNAKE_CASE , index - (m + count) )
| 93 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = FunnelTokenizer
SCREAMING_SNAKE_CASE = FunnelTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Optional[int]:
"""simple docstring"""
super().setUp()
__lowerCAmelCase : str = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , **_SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any , **_SCREAMING_SNAKE_CASE: Any) -> str:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: str) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = "UNwant\u00E9d,running"
__lowerCAmelCase : str = "unwanted, running"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Any = self.tokenizer_class(self.vocab_file)
__lowerCAmelCase : Any = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE) , [7, 4, 5, 10, 8, 9])
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.get_tokenizers(do_lower_case=_SCREAMING_SNAKE_CASE)
for tokenizer in tokenizers:
__lowerCAmelCase : List[str] = tokenizer("UNwant\u00E9d,running")
__lowerCAmelCase : Optional[int] = len(inputs["input_ids"]) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len)
__lowerCAmelCase : List[str] = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running")
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len) | 269 | 0 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
snake_case : Dict = '''facebook/wmt19-en-de'''
snake_case : List[Any] = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
snake_case : Optional[Any] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
snake_case : Optional[Any] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
snake_case : Union[str, Any] = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
snake_case : int = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
snake_case : Optional[int] = '''tiny-wmt19-en-de'''
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 94 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _lowercase ( __snake_case = "laptop" ) -> DataFrame:
__lowerCAmelCase : str = F"""https://www.amazon.in/laptop/s?k={product}"""
__lowerCAmelCase : Union[str, Any] = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
__lowerCAmelCase : List[str] = BeautifulSoup(requests.get(__snake_case ,headers=__snake_case ).text )
# Initialize a Pandas dataframe with the column titles
__lowerCAmelCase : Dict = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" ,attrs={"class": "s-result-item", "data-component-type": "s-search-result"} ,) ,soup.find_all("div" ,attrs={"class": "a-row a-size-base a-color-base"} ) ,):
try:
__lowerCAmelCase : Any = item.ha.text
__lowerCAmelCase : Union[str, Any] = "https://www.amazon.in/" + item.ha.a["href"]
__lowerCAmelCase : Any = item.find("span" ,attrs={"class": "a-offscreen"} ).text
try:
__lowerCAmelCase : Union[str, Any] = item.find("span" ,attrs={"class": "a-icon-alt"} ).text
except AttributeError:
__lowerCAmelCase : Optional[Any] = "Not available"
try:
__lowerCAmelCase : Union[str, Any] = (
"₹"
+ item.find(
"span" ,attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
__lowerCAmelCase : Dict = ""
try:
__lowerCAmelCase : str = float(
(
(
float(product_mrp.strip("₹" ).replace("," ,"" ) )
- float(product_price.strip("₹" ).replace("," ,"" ) )
)
/ float(product_mrp.strip("₹" ).replace("," ,"" ) )
)
* 100 )
except ValueError:
__lowerCAmelCase : List[str] = float("nan" )
except AttributeError:
pass
__lowerCAmelCase : int = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__lowerCAmelCase : Union[str, Any] = " "
__lowerCAmelCase : Union[str, Any] = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__snake_case : Any = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""") | 269 | 0 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def _A ( SCREAMING_SNAKE_CASE : dict ):
"""simple docstring"""
return (data["data"], data["target"])
def _A ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray ):
"""simple docstring"""
a__ : Tuple =XGBClassifier()
classifier.fit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return classifier
def _A ( ):
"""simple docstring"""
a__ : Optional[Any] =load_iris()
a__ , a__ : Union[str, Any] =data_handling(SCREAMING_SNAKE_CASE )
a__ , a__ , a__ , a__ : int =train_test_split(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , test_size=0.2_5 )
a__ : List[str] =iris["target_names"]
# Create an XGBoost Classifier from the training data
a__ : Union[str, Any] =xgboost(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , display_labels=SCREAMING_SNAKE_CASE , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 95 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=_SCREAMING_SNAKE_CASE , )
assert hasattr(self , "env")
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Any=1) -> Dict:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=_SCREAMING_SNAKE_CASE , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: List[Any]) -> Optional[Any]:
"""simple docstring"""
TrainingJobAnalytics(_SCREAMING_SNAKE_CASE).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""")
def _SCREAMING_SNAKE_CASE ( self: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__lowerCAmelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
__lowerCAmelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
__lowerCAmelCase : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCAmelCase : Tuple = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 99_9999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , _SCREAMING_SNAKE_CASE) | 269 | 0 |
"""simple docstring"""
lowercase__ = range(2, 20 + 1)
lowercase__ = [10**k for k in range(ks[-1] + 1)]
lowercase__ = {}
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : Optional[Any] = sum(a_i[j] for j in range(lowercase__ , len(lowercase__ ) ) )
_lowerCamelCase : Dict = sum(a_i[j] * base[j] for j in range(min(len(lowercase__ ) , lowercase__ ) ) )
_lowerCamelCase, _lowerCamelCase : Dict = 0, 0
_lowerCamelCase : List[str] = n - i
_lowerCamelCase : Optional[int] = memo.get(lowercase__ )
if sub_memo is not None:
_lowerCamelCase : Tuple = sub_memo.get(lowercase__ )
if jumps is not None and len(lowercase__ ) > 0:
# find and make the largest jump without going over
_lowerCamelCase : Optional[Any] = -1
for _k in range(len(lowercase__ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_lowerCamelCase : Dict = _k
break
if max_jump >= 0:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = jumps[max_jump]
# since the difference between jumps is cached, add c
_lowerCamelCase : Any = diff + c
for j in range(min(lowercase__ , len(lowercase__ ) ) ):
_lowerCamelCase, _lowerCamelCase : Tuple = divmod(lowercase__ , 10 )
if new_c > 0:
add(lowercase__ , lowercase__ , lowercase__ )
else:
_lowerCamelCase : List[Any] = []
else:
_lowerCamelCase : List[str] = {c: []}
_lowerCamelCase : str = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_lowerCamelCase, _lowerCamelCase : Any = next_term(lowercase__ , k - 1 , i + dn , lowercase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_lowerCamelCase, _lowerCamelCase : str = compute(lowercase__ , lowercase__ , i + dn , lowercase__ )
diff += _diff
dn += terms_jumped
_lowerCamelCase : Union[str, Any] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_lowerCamelCase : Dict = 0
while j < len(lowercase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowercase__ , (diff, dn, k) )
return (diff, dn)
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if i >= n:
return 0, i
if k > len(lowercase__ ):
a_i.extend([0 for _ in range(k - len(lowercase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_lowerCamelCase : int = i
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = 0, 0, 0
for j in range(len(lowercase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_lowerCamelCase : int = ds_c + ds_b
diff += addend
_lowerCamelCase : Optional[int] = 0
for j in range(lowercase__ ):
_lowerCamelCase : Dict = a_i[j] + addend
_lowerCamelCase, _lowerCamelCase : List[Any] = divmod(lowercase__ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowercase__ , lowercase__ , lowercase__ )
return diff, i - start_i
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
for j in range(lowercase__ , len(lowercase__ ) ):
_lowerCamelCase : int = digits[j] + addend
if s >= 10:
_lowerCamelCase, _lowerCamelCase : Dict = divmod(lowercase__ , 10 )
_lowerCamelCase : Union[str, Any] = addend // 10 + quotient
else:
_lowerCamelCase : Dict = s
_lowerCamelCase : Optional[Any] = addend // 10
if addend == 0:
break
while addend > 0:
_lowerCamelCase, _lowerCamelCase : Dict = divmod(lowercase__ , 10 )
digits.append(lowercase__ )
def _snake_case ( lowercase__ = 10**15 ):
_lowerCamelCase : Optional[Any] = [1]
_lowerCamelCase : int = 1
_lowerCamelCase : Any = 0
while True:
_lowerCamelCase, _lowerCamelCase : Dict = next_term(lowercase__ , 20 , i + dn , lowercase__ )
dn += terms_jumped
if dn == n - i:
break
_lowerCamelCase : int = 0
for j in range(len(lowercase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"{solution() = }") | 96 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__snake_case : Optional[int] = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 269 | 0 |
'''simple docstring'''
def a ( __a , __a ) -> int:
'''simple docstring'''
if len(__a ) != len(__a ):
raise ValueError('''String lengths must match!''' )
UpperCamelCase__ :Union[str, Any] = 0
for chara, chara in zip(__a , __a ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case : Optional[int] = logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'maskformer-swin'
SCREAMING_SNAKE_CASE = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: int=224 , _SCREAMING_SNAKE_CASE: Tuple=4 , _SCREAMING_SNAKE_CASE: int=3 , _SCREAMING_SNAKE_CASE: List[Any]=96 , _SCREAMING_SNAKE_CASE: Union[str, Any]=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE: Any=[3, 6, 12, 24] , _SCREAMING_SNAKE_CASE: List[str]=7 , _SCREAMING_SNAKE_CASE: List[str]=4.0 , _SCREAMING_SNAKE_CASE: Optional[int]=True , _SCREAMING_SNAKE_CASE: Tuple=0.0 , _SCREAMING_SNAKE_CASE: Any=0.0 , _SCREAMING_SNAKE_CASE: Any=0.1 , _SCREAMING_SNAKE_CASE: str="gelu" , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: Union[str, Any]=0.02 , _SCREAMING_SNAKE_CASE: str=1e-5 , _SCREAMING_SNAKE_CASE: Optional[int]=None , _SCREAMING_SNAKE_CASE: str=None , **_SCREAMING_SNAKE_CASE: Union[str, Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = image_size
__lowerCAmelCase : Any = patch_size
__lowerCAmelCase : Tuple = num_channels
__lowerCAmelCase : Any = embed_dim
__lowerCAmelCase : Any = depths
__lowerCAmelCase : Dict = len(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = num_heads
__lowerCAmelCase : Tuple = window_size
__lowerCAmelCase : Dict = mlp_ratio
__lowerCAmelCase : Any = qkv_bias
__lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
__lowerCAmelCase : int = attention_probs_dropout_prob
__lowerCAmelCase : Tuple = drop_path_rate
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : Optional[int] = use_absolute_embeddings
__lowerCAmelCase : List[str] = layer_norm_eps
__lowerCAmelCase : Any = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCAmelCase : Optional[Any] = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE) - 1))
__lowerCAmelCase : Any = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(_SCREAMING_SNAKE_CASE) + 1)]
__lowerCAmelCase , __lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names) | 269 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "upernet"
def __init__( self : int ,lowerCamelCase__ : Any=None ,lowerCamelCase__ : List[Any]=512 ,lowerCamelCase__ : str=0.0_2 ,lowerCamelCase__ : List[Any]=[1, 2, 3, 6] ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Optional[Any]=0.4 ,lowerCamelCase__ : int=384 ,lowerCamelCase__ : int=256 ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : Optional[int]=False ,lowerCamelCase__ : Union[str, Any]=255 ,**lowerCamelCase__ : str ,):
super().__init__(**lowerCamelCase__ )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
UpperCAmelCase__ = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__ = backbone_config.get('model_type' )
UpperCAmelCase__ = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ = config_class.from_dict(lowerCamelCase__ )
UpperCAmelCase__ = backbone_config
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = pool_scales
UpperCAmelCase__ = use_auxiliary_head
UpperCAmelCase__ = auxiliary_loss_weight
UpperCAmelCase__ = auxiliary_in_channels
UpperCAmelCase__ = auxiliary_channels
UpperCAmelCase__ = auxiliary_num_convs
UpperCAmelCase__ = auxiliary_concat_input
UpperCAmelCase__ = loss_ignore_index
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ = self.backbone_config.to_dict()
UpperCAmelCase__ = self.__class__.model_type
return output
| 98 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class A__ :
'''simple docstring'''
def __init__( self: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = psutil.Process()
__lowerCAmelCase : str = False
def _SCREAMING_SNAKE_CASE ( self: int) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = -1
while True:
__lowerCAmelCase : str = max(self.process.memory_info().rss , self.cpu_memory_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : str = threading.Thread(target=self.peak_monitor)
__lowerCAmelCase : Tuple = True
self.thread.start()
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = False
self.thread.join()
return self.cpu_memory_peak
__snake_case : Tuple = PeakCPUMemory()
def _lowercase ( ) -> str:
# Time
__lowerCAmelCase : str = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase : Optional[Any] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase : Union[str, Any] = torch.cuda.memory_allocated(__snake_case )
torch.cuda.reset_peak_memory_stats()
return measures
def _lowercase ( __snake_case ) -> Optional[Any]:
# Time
__lowerCAmelCase : str = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase : str = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
__lowerCAmelCase : List[str] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase : Union[str, Any] = (torch.cuda.memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**20
__lowerCAmelCase : Any = (torch.cuda.max_memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**20
return measures
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
print(F"""{description}:""" )
print(F"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(__snake_case )]:.2f}MiB""" )
__lowerCAmelCase : Optional[Any] = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" ) | 269 | 0 |
lowercase : Union[str, Any] = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
def A_ ( A__ , A__ , A__ ) -> list[str]:
a__ : List[str] = set()
# keep track of all the paths to be checked
a__ : Union[str, Any] = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
a__ : Tuple = queue.pop(0 )
# get the last node from the path
a__ : Optional[int] = path[-1]
if node not in explored:
a__ : List[str] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
a__ : Optional[Any] = list(A__ )
new_path.append(A__ )
queue.append(A__ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(A__ )
# in case there's no path between the 2 nodes
return []
def A_ ( A__ , A__ , A__ ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
a__ : Tuple = [start]
a__ : Union[str, Any] = set(A__ )
# Keep tab on distances from `start` node.
a__ : Optional[Any] = {start: 0, target: -1}
while queue:
a__ : str = queue.pop(0 )
if node == target:
a__ : List[Any] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(A__ )
queue.append(A__ )
a__ : int = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
| 99 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Any = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'mctct'
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: str=8065 , _SCREAMING_SNAKE_CASE: str=1536 , _SCREAMING_SNAKE_CASE: str=36 , _SCREAMING_SNAKE_CASE: Optional[Any]=6144 , _SCREAMING_SNAKE_CASE: Optional[Any]=4 , _SCREAMING_SNAKE_CASE: Union[str, Any]=384 , _SCREAMING_SNAKE_CASE: Optional[Any]=920 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1e-5 , _SCREAMING_SNAKE_CASE: List[Any]=0.3 , _SCREAMING_SNAKE_CASE: Optional[Any]="relu" , _SCREAMING_SNAKE_CASE: Optional[int]=0.02 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.3 , _SCREAMING_SNAKE_CASE: Dict=0.3 , _SCREAMING_SNAKE_CASE: List[Any]=1 , _SCREAMING_SNAKE_CASE: Optional[Any]=0 , _SCREAMING_SNAKE_CASE: List[str]=2 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1 , _SCREAMING_SNAKE_CASE: Tuple=0.3 , _SCREAMING_SNAKE_CASE: Dict=1 , _SCREAMING_SNAKE_CASE: int=(7,) , _SCREAMING_SNAKE_CASE: str=(3,) , _SCREAMING_SNAKE_CASE: Union[str, Any]=80 , _SCREAMING_SNAKE_CASE: Tuple=1 , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: Tuple="sum" , _SCREAMING_SNAKE_CASE: List[str]=False , **_SCREAMING_SNAKE_CASE: Tuple , ) -> Tuple:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : str = hidden_size
__lowerCAmelCase : str = num_hidden_layers
__lowerCAmelCase : str = intermediate_size
__lowerCAmelCase : List[Any] = num_attention_heads
__lowerCAmelCase : Dict = attention_head_dim
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : str = layer_norm_eps
__lowerCAmelCase : Tuple = layerdrop
__lowerCAmelCase : str = hidden_act
__lowerCAmelCase : List[Any] = initializer_range
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCAmelCase : str = pad_token_id
__lowerCAmelCase : Optional[int] = bos_token_id
__lowerCAmelCase : Union[str, Any] = eos_token_id
__lowerCAmelCase : Any = conv_glu_dim
__lowerCAmelCase : Optional[int] = conv_dropout
__lowerCAmelCase : Union[str, Any] = num_conv_layers
__lowerCAmelCase : Optional[int] = input_feat_per_channel
__lowerCAmelCase : Union[str, Any] = input_channels
__lowerCAmelCase : Optional[Any] = conv_channels
__lowerCAmelCase : Dict = ctc_loss_reduction
__lowerCAmelCase : int = ctc_zero_infinity
# prevents config testing fail with exporting to json
__lowerCAmelCase : List[str] = list(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = list(_SCREAMING_SNAKE_CASE)
if len(self.conv_kernel) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel)}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""") | 269 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = {
"""^""": 3,
"""*""": 2,
"""/""": 2,
"""%""": 2,
"""+""": 1,
"""-""": 1,
} # Priority of each operator
__SCREAMING_SNAKE_CASE = len(UpperCamelCase_ ) if (len(UpperCamelCase_ ) > 7) else 7
# Print table header for output
print(
"""Symbol""".center(8 ) , """Stack""".center(UpperCamelCase_ ) , """Postfix""".center(UpperCamelCase_ ) , sep=""" | """ , )
print("""-""" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(UpperCamelCase_ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(UpperCamelCase_ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(UpperCamelCase_ ) == 0:
stack.append(UpperCamelCase_ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(UpperCamelCase_ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(UpperCamelCase_ ) # push x to stack
print(
x.center(8 ) , ("""""".join(UpperCamelCase_ )).ljust(UpperCamelCase_ ) , ("""""".join(UpperCamelCase_ )).ljust(UpperCamelCase_ ) , sep=""" | """ , ) # Output in tabular format
while len(UpperCamelCase_ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
""" """.center(8 ) , ("""""".join(UpperCamelCase_ )).ljust(UpperCamelCase_ ) , ("""""".join(UpperCamelCase_ )).ljust(UpperCamelCase_ ) , sep=""" | """ , ) # Output in tabular format
return "".join(UpperCamelCase_ ) # return Postfix as str
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = list(infix[::-1] ) # reverse the infix equation
for i in range(len(UpperCamelCase_ ) ):
if infix[i] == "(":
__SCREAMING_SNAKE_CASE = """)""" # change "(" to ")"
elif infix[i] == ")":
__SCREAMING_SNAKE_CASE = """(""" # change ")" to "("
return (infix_2_postfix("""""".join(UpperCamelCase_ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__magic_name__ = input("\nEnter an Infix Equation = ") # Input an Infix equation
__magic_name__ = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 100 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
__snake_case : Optional[Any] = [8, 5, 9, 7]
__snake_case : List[Any] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__snake_case : Optional[int] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class A__ :
'''simple docstring'''
def __init__( self: Any , _SCREAMING_SNAKE_CASE: list[int] , _SCREAMING_SNAKE_CASE: list[list[int]] , _SCREAMING_SNAKE_CASE: list[list[int]] , ) -> None:
"""simple docstring"""
__lowerCAmelCase : Any = claim_vector
__lowerCAmelCase : Tuple = allocated_resources_table
__lowerCAmelCase : Tuple = maximum_claim_table
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table)
for i in range(len(self.__allocated_resources_table[0]))
]
def _SCREAMING_SNAKE_CASE ( self: int) -> list[int]:
"""simple docstring"""
return np.array(self.__claim_vector) - np.array(
self.__processes_resource_summation())
def _SCREAMING_SNAKE_CASE ( self: int) -> list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i]) - np.array(_SCREAMING_SNAKE_CASE))
for i, allocated_resource in enumerate(self.__allocated_resources_table)
]
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(_SCREAMING_SNAKE_CASE): i for i in self.__need()}
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , **_SCREAMING_SNAKE_CASE: List[Any]) -> None:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.__need()
__lowerCAmelCase : int = self.__allocated_resources_table
__lowerCAmelCase : Dict = self.__available_resources()
__lowerCAmelCase : str = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n")
while need_list:
__lowerCAmelCase : int = False
for each_need in need_list:
__lowerCAmelCase : Dict = True
for index, need in enumerate(_SCREAMING_SNAKE_CASE):
if need > available_resources[index]:
__lowerCAmelCase : Dict = False
break
if execution:
__lowerCAmelCase : Any = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowerCAmelCase : Union[str, Any] = original_need_index
print(F"""Process {process_number + 1} is executing.""")
# remove the process run from stack
need_list.remove(_SCREAMING_SNAKE_CASE)
# update available/freed resources stack
__lowerCAmelCase : Dict = np.array(_SCREAMING_SNAKE_CASE) + np.array(
alloc_resources_table[process_number])
print(
"Updated available resource stack for processes: "
+ " ".join([str(_SCREAMING_SNAKE_CASE) for x in available_resources]))
break
if safe:
print("The process is in a safe state.\n")
else:
print("System in unsafe state. Aborting...\n")
break
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> List[Any]:
"""simple docstring"""
print(" " * 9 + "Allocated Resource Table")
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(_SCREAMING_SNAKE_CASE) + 1}"""
+ " ".join(F"""{it:>8}""" for it in item)
+ "\n")
print(" " * 9 + "System Resource Table")
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(_SCREAMING_SNAKE_CASE) + 1}"""
+ " ".join(F"""{it:>8}""" for it in item)
+ "\n")
print(
"Current Usage by Active Processes: "
+ " ".join(str(_SCREAMING_SNAKE_CASE) for x in self.__claim_vector))
print(
"Initial Available Resources: "
+ " ".join(str(_SCREAMING_SNAKE_CASE) for x in self.__available_resources()))
time.sleep(1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 269 | 0 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
lowercase__ :Optional[int] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , use_external_data_format=lowerCAmelCase__ , enable_onnx_checker=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , )
else:
export(
lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , )
@torch.no_grad()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ):
'''simple docstring'''
lowercase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
lowercase = '''cpu'''
lowercase = StableDiffusionPipeline.from_pretrained(lowerCAmelCase__ , torch_dtype=lowerCAmelCase__ ).to(lowerCAmelCase__ )
lowercase = Path(lowerCAmelCase__ )
# TEXT ENCODER
lowercase = pipeline.text_encoder.config.max_position_embeddings
lowercase = pipeline.text_encoder.config.hidden_size
lowercase = pipeline.tokenizer(
'''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors='''pt''' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=lowerCAmelCase__ , dtype=torch.intaa )) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''sequence'''},
} , opset=lowerCAmelCase__ , )
del pipeline.text_encoder
# UNET
lowercase = pipeline.unet.config.in_channels
lowercase = pipeline.unet.config.sample_size
lowercase = output_path / '''unet''' / '''model.onnx'''
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
torch.randn(2 ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
torch.randn(2 , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
False,
) , output_path=lowerCAmelCase__ , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''timestep''': {0: '''batch'''},
'''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''},
} , opset=lowerCAmelCase__ , use_external_data_format=lowerCAmelCase__ , )
lowercase = str(unet_path.absolute().as_posix() )
lowercase = os.path.dirname(lowerCAmelCase__ )
lowercase = onnx.load(lowerCAmelCase__ )
# clean up existing tensor files
shutil.rmtree(lowerCAmelCase__ )
os.mkdir(lowerCAmelCase__ )
# collate external tensor files into one
onnx.save_model(
lowerCAmelCase__ , lowerCAmelCase__ , save_as_external_data=lowerCAmelCase__ , all_tensors_to_one_file=lowerCAmelCase__ , location='''weights.pb''' , convert_attribute=lowerCAmelCase__ , )
del pipeline.unet
# VAE ENCODER
lowercase = pipeline.vae
lowercase = vae_encoder.config.in_channels
lowercase = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowercase = lambda lowerCAmelCase__ , lowerCAmelCase__ : vae_encoder.encode(lowerCAmelCase__ , lowerCAmelCase__ )[0].sample()
onnx_export(
lowerCAmelCase__ , model_args=(
torch.randn(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
False,
) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=lowerCAmelCase__ , )
# VAE DECODER
lowercase = pipeline.vae
lowercase = vae_decoder.config.latent_channels
lowercase = vae_decoder.config.out_channels
# forward only through the decoder part
lowercase = vae_encoder.decode
onnx_export(
lowerCAmelCase__ , model_args=(
torch.randn(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=lowerCAmelCase__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowercase = pipeline.safety_checker
lowercase = safety_checker.config.vision_config.num_channels
lowercase = safety_checker.config.vision_config.image_size
lowercase = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
torch.randn(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={
'''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''},
} , opset=lowerCAmelCase__ , )
del pipeline.safety_checker
lowercase = OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''' )
lowercase = pipeline.feature_extractor
else:
lowercase = None
lowercase = None
lowercase = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''' ) , scheduler=pipeline.scheduler , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(lowerCAmelCase__ )
print('''ONNX pipeline saved to''' , lowerCAmelCase__ )
del pipeline
del onnx_pipeline
lowercase = OnnxStableDiffusionPipeline.from_pretrained(lowerCAmelCase__ , provider='''CPUExecutionProvider''' )
print('''ONNX pipeline is loadable''' )
if __name__ == "__main__":
lowercase__ :Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
lowercase__ :List[Any] = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 101 |
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _lowercase ( *__snake_case ) -> Optional[Any]:
with open(__snake_case ,"r" ) as fh:
fcntl.flock(__snake_case ,fcntl.LOCK_EX )
try:
print(*__snake_case )
finally:
fcntl.flock(__snake_case ,fcntl.LOCK_UN )
__snake_case : List[Any] = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
__snake_case : List[str] = torch.device('cuda', local_rank)
__snake_case : Optional[Any] = socket.gethostname()
__snake_case : str = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__snake_case : Tuple = dist.get_rank()
__snake_case : Any = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise | 269 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : Optional[int] = {
"""configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"""MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegatronBertForCausalLM""",
"""MegatronBertForMaskedLM""",
"""MegatronBertForMultipleChoice""",
"""MegatronBertForNextSentencePrediction""",
"""MegatronBertForPreTraining""",
"""MegatronBertForQuestionAnswering""",
"""MegatronBertForSequenceClassification""",
"""MegatronBertForTokenClassification""",
"""MegatronBertModel""",
"""MegatronBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 102 |
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__snake_case : Optional[int] = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
__snake_case : str = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
__snake_case : str = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def _lowercase ( __snake_case ,__snake_case ) -> Union[str, Any]:
return float((preds == labels).mean() )
def _lowercase ( __snake_case ,__snake_case ) -> str:
__lowerCAmelCase : str = simple_accuracy(__snake_case ,__snake_case )
__lowerCAmelCase : Any = float(fa_score(y_true=__snake_case ,y_pred=__snake_case ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowercase ( __snake_case ,__snake_case ) -> int:
__lowerCAmelCase : Union[str, Any] = np.array(__snake_case )
__lowerCAmelCase : Tuple = np.array(__snake_case )
__lowerCAmelCase : List[Any] = en_sentvecs.shape[0]
# mean centering
__lowerCAmelCase : Union[str, Any] = en_sentvecs - np.mean(__snake_case ,axis=0 )
__lowerCAmelCase : int = in_sentvecs - np.mean(__snake_case ,axis=0 )
__lowerCAmelCase : Optional[Any] = cdist(__snake_case ,__snake_case ,"cosine" )
__lowerCAmelCase : int = np.array(range(__snake_case ) )
__lowerCAmelCase : int = sim.argsort(axis=1 )[:, :10]
__lowerCAmelCase : Optional[Any] = np.any(preds == actual[:, None] ,axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: int) -> str:
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64")
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32")),
"references": datasets.Value("int64")
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32")),
}) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[Any]) -> int:
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]") | 269 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__ : str = logging.get_logger(__name__)
A__ : List[str] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
A__ : Union[str, Any] = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
A__ : List[Any] = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCamelCase( ):
lowerCAmelCase_ : Optional[Any] = (
list(range(ord('''!''' ) ,ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) ,ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) ,ord('''ÿ''' ) + 1 ) )
)
lowerCAmelCase_ : Any = bs[:]
lowerCAmelCase_ : Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
lowerCAmelCase_ : Optional[int] = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase ,__UpperCamelCase ) )
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ):
lowerCAmelCase_ : str = set()
lowerCAmelCase_ : List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase_ : str = char
return pairs
class __snake_case ( UpperCamelCase_ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['''input_ids''', '''attention_mask''']
def __init__( self : Any , A_ : Dict , A_ : Tuple , A_ : List[str]="replace" , A_ : str="<s>" , A_ : int="</s>" , A_ : Tuple="</s>" , A_ : int="<s>" , A_ : Any="<unk>" , A_ : Optional[Any]="<pad>" , A_ : List[str]="<mask>" , A_ : Union[str, Any]=False , **A_ : Optional[Any] , ):
lowerCAmelCase_ : str = AddedToken(A_ , lstrip=A_ , rstrip=A_) if isinstance(A_ , A_) else bos_token
lowerCAmelCase_ : Any = AddedToken(A_ , lstrip=A_ , rstrip=A_) if isinstance(A_ , A_) else eos_token
lowerCAmelCase_ : List[Any] = AddedToken(A_ , lstrip=A_ , rstrip=A_) if isinstance(A_ , A_) else sep_token
lowerCAmelCase_ : Union[str, Any] = AddedToken(A_ , lstrip=A_ , rstrip=A_) if isinstance(A_ , A_) else cls_token
lowerCAmelCase_ : Any = AddedToken(A_ , lstrip=A_ , rstrip=A_) if isinstance(A_ , A_) else unk_token
lowerCAmelCase_ : int = AddedToken(A_ , lstrip=A_ , rstrip=A_) if isinstance(A_ , A_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Any = AddedToken(A_ , lstrip=A_ , rstrip=A_) if isinstance(A_ , A_) else mask_token
super().__init__(
errors=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , **A_ , )
with open(A_ , encoding='''utf-8''') as vocab_handle:
lowerCAmelCase_ : Any = json.load(A_)
lowerCAmelCase_ : int = {v: k for k, v in self.encoder.items()}
lowerCAmelCase_ : List[Any] = errors # how to handle errors in decoding
lowerCAmelCase_ : Dict = bytes_to_unicode()
lowerCAmelCase_ : Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(A_ , encoding='''utf-8''') as merges_handle:
lowerCAmelCase_ : Any = merges_handle.read().split('''\n''')[1:-1]
lowerCAmelCase_ : List[Any] = [tuple(merge.split()) for merge in bpe_merges]
lowerCAmelCase_ : Union[str, Any] = dict(zip(A_ , range(len(A_))))
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase_ : Union[str, Any] = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''')
@property
def UpperCAmelCase__ ( self : Optional[Any]):
return len(self.encoder)
def UpperCAmelCase__ ( self : List[str]):
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase__ ( self : Dict , A_ : List[str]):
if token in self.cache:
return self.cache[token]
lowerCAmelCase_ : List[Any] = tuple(A_)
lowerCAmelCase_ : Optional[Any] = get_pairs(A_)
if not pairs:
return token
while True:
lowerCAmelCase_ : int = min(A_ , key=lambda A_: self.bpe_ranks.get(A_ , float('''inf''')))
if bigram not in self.bpe_ranks:
break
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = bigram
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : Union[str, Any] = 0
while i < len(A_):
try:
lowerCAmelCase_ : List[str] = word.index(A_ , A_)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
lowerCAmelCase_ : Union[str, Any] = j
if word[i] == first and i < len(A_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
lowerCAmelCase_ : Union[str, Any] = tuple(A_)
lowerCAmelCase_ : List[Any] = new_word
if len(A_) == 1:
break
else:
lowerCAmelCase_ : List[Any] = get_pairs(A_)
lowerCAmelCase_ : Any = ''' '''.join(A_)
lowerCAmelCase_ : Any = word
return word
def UpperCAmelCase__ ( self : str , A_ : Optional[Any]):
lowerCAmelCase_ : Optional[Any] = []
for token in re.findall(self.pat , A_):
lowerCAmelCase_ : List[Any] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_).split(''' '''))
return bpe_tokens
def UpperCAmelCase__ ( self : Any , A_ : str):
return self.encoder.get(A_ , self.encoder.get(self.unk_token))
def UpperCAmelCase__ ( self : str , A_ : int):
return self.decoder.get(A_)
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : Tuple):
lowerCAmelCase_ : Optional[int] = ''''''.join(A_)
lowerCAmelCase_ : Optional[Any] = bytearray([self.byte_decoder[c] for c in text]).decode('''utf-8''' , errors=self.errors)
return text
def UpperCAmelCase__ ( self : List[Any] , A_ : str , A_ : Optional[str] = None):
if not os.path.isdir(A_):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
lowerCAmelCase_ : Union[str, Any] = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
lowerCAmelCase_ : List[Any] = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''])
with open(A_ , '''w''' , encoding='''utf-8''') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_) + '''\n''')
lowerCAmelCase_ : str = 0
with open(A_ , '''w''' , encoding='''utf-8''') as writer:
writer.write('''#version: 0.2\n''')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_: kv[1]):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''')
lowerCAmelCase_ : Any = token_index
writer.write(''' '''.join(A_) + '''\n''')
index += 1
return vocab_file, merge_file
def UpperCAmelCase__ ( self : List[str] , A_ : List[int] , A_ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [self.cls_token_id]
lowerCAmelCase_ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : int , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_)
if token_ids_a is None:
return [1] + ([0] * len(A_)) + [1]
return [1] + ([0] * len(A_)) + [1, 1] + ([0] * len(A_)) + [1]
def UpperCAmelCase__ ( self : str , A_ : List[int] , A_ : Optional[List[int]] = None):
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def UpperCAmelCase__ ( self : Dict , A_ : Any , A_ : str=False , **A_ : Optional[int]):
lowerCAmelCase_ : Union[str, Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(A_) > 0 and not text[0].isspace()):
lowerCAmelCase_ : str = ''' ''' + text
return (text, kwargs)
| 103 |
"""simple docstring"""
def _lowercase ( __snake_case ,__snake_case ) -> float:
if digit_amount > 0:
return round(number - int(__snake_case ) ,__snake_case )
return number - int(__snake_case )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3)) | 269 | 0 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowerCAmelCase__ = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
lowerCAmelCase__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase__ = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
lowerCAmelCase__ = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
lowerCAmelCase__ = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _A ( A__ , A__ ):
"""simple docstring"""
assert ReadMe.from_string(A__ , A__ ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _A ( A__ , A__ ):
"""simple docstring"""
with pytest.raises(A__ , match=re.escape(expected_error.format(path='''root''' ) ) ):
__lowercase = ReadMe.from_string(A__ , A__ )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _A ( A__ , A__ ):
"""simple docstring"""
with pytest.raises(A__ , match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(A__ , A__ )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _A ( A__ ):
"""simple docstring"""
ReadMe.from_string(A__ , A__ , suppress_parsing_errors=A__ )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _A ( A__ , A__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = Path(A__ ) / '''README.md'''
with open(A__ , '''w+''' ) as readme_file:
readme_file.write(A__ )
__lowercase = ReadMe.from_readme(A__ , A__ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _A ( A__ , A__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = Path(A__ ) / '''README.md'''
with open(A__ , '''w+''' ) as readme_file:
readme_file.write(A__ )
__lowercase = expected_error.format(path=A__ )
with pytest.raises(A__ , match=re.escape(A__ ) ):
__lowercase = ReadMe.from_readme(A__ , A__ )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _A ( A__ , A__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = Path(A__ ) / '''README.md'''
with open(A__ , '''w+''' ) as readme_file:
readme_file.write(A__ )
__lowercase = expected_error.format(path=A__ )
with pytest.raises(A__ , match=re.escape(A__ ) ):
ReadMe.from_readme(A__ , A__ )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _A ( A__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = Path(A__ ) / '''README.md'''
with open(A__ , '''w+''' ) as readme_file:
readme_file.write(A__ )
ReadMe.from_readme(A__ , A__ , suppress_parsing_errors=A__ )
| 104 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'torchsde']
def __init__( self: int , *_SCREAMING_SNAKE_CASE: Optional[Any] , **_SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
requires_backends(self , ["torch", "torchsde"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Optional[Any] , *_SCREAMING_SNAKE_CASE: Optional[int] , **_SCREAMING_SNAKE_CASE: Optional[int]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "torchsde"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Dict , *_SCREAMING_SNAKE_CASE: List[Any] , **_SCREAMING_SNAKE_CASE: Any) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch", "torchsde"]) | 269 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : Tuple = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : Union[str, Any] ="""xlm-roberta"""
def __init__( self , lowerCAmelCase__=3_0522 , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=3072 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Optional[int]:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
a : Any = vocab_size
a : int = hidden_size
a : int = num_hidden_layers
a : Tuple = num_attention_heads
a : Optional[Any] = hidden_act
a : Optional[Any] = intermediate_size
a : Union[str, Any] = hidden_dropout_prob
a : Optional[Any] = attention_probs_dropout_prob
a : Union[str, Any] = max_position_embeddings
a : str = type_vocab_size
a : int = initializer_range
a : Optional[Any] = layer_norm_eps
a : Any = position_embedding_type
a : Tuple = use_cache
a : Union[str, Any] = classifier_dropout
class __UpperCamelCase ( a__ ):
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
a : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 105 |
"""simple docstring"""
def _lowercase ( ) -> int:
return 1
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(__snake_case )
def _lowercase ( __snake_case ) -> int:
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(__snake_case )
def _lowercase ( __snake_case = 200 ) -> int:
return two_pound(__snake_case )
if __name__ == "__main__":
print(solution(int(input().strip()))) | 269 | 0 |
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
__UpperCamelCase : Optional[int] = {
'''n_samples''': 6_4,
'''horizon''': 3_2,
'''num_inference_steps''': 2_0,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = '''hopper-medium-v2'''
__UpperCamelCase : Optional[int] = gym.make(env_name)
__UpperCamelCase : List[Any] = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
__UpperCamelCase : List[Any] = env.reset()
__UpperCamelCase : Optional[Any] = 0
__UpperCamelCase : Any = 0
__UpperCamelCase : Any = 1_0_0_0
__UpperCamelCase : Dict = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
__UpperCamelCase : Optional[int] = pipeline(obs, planning_horizon=3_2)
# execute action in environment
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[Any] = env.step(denorm_actions)
__UpperCamelCase : Optional[Any] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
F''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
__UpperCamelCase : str = next_observation
except KeyboardInterrupt:
pass
print(F'''Total reward: {total_reward}''')
| 106 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8") as input_file:
__lowerCAmelCase : List[str] = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)")
__lowerCAmelCase : List[Any] = input_file.read()
__lowerCAmelCase : Any = regexp.search(_SCREAMING_SNAKE_CASE)
return match
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: str) -> Optional[Any]:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8") as input_file:
__lowerCAmelCase : Any = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL)
__lowerCAmelCase : Optional[int] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__lowerCAmelCase : int = regexp.finditer(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def _SCREAMING_SNAKE_CASE ( self: str) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = Path("./datasets")
__lowerCAmelCase : Optional[int] = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_SCREAMING_SNAKE_CASE)):
raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""")
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Dict = Path("./datasets")
__lowerCAmelCase : Union[str, Any] = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_print_statements(str(_SCREAMING_SNAKE_CASE)):
raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""") | 269 | 0 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
__lowerCAmelCase : str = TypeVar('T')
class snake_case__ (Generic[T] ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : bool = True ) -> None:
a = {} # dictionary of lists
a = directed
def __UpperCAmelCase ( self : str , __lowerCamelCase : T , __lowerCamelCase : T ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowerCamelCase )
self.adj_list[destination_vertex].append(__lowerCamelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowerCamelCase )
a = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__lowerCamelCase )
a = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
a = [destination_vertex]
a = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowerCamelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowerCamelCase )
a = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
a = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
a = [destination_vertex]
a = []
return self
def __repr__( self : Tuple ) -> str:
return pformat(self.adj_list )
| 107 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__snake_case : Optional[Any] = logging.get_logger(__name__)
__snake_case : Optional[Any] = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
__snake_case : Any = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
__snake_case : str = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'whisper'
SCREAMING_SNAKE_CASE = ['past_key_values']
SCREAMING_SNAKE_CASE = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: Any=5_1865 , _SCREAMING_SNAKE_CASE: Optional[Any]=80 , _SCREAMING_SNAKE_CASE: Optional[int]=6 , _SCREAMING_SNAKE_CASE: Any=4 , _SCREAMING_SNAKE_CASE: Dict=6 , _SCREAMING_SNAKE_CASE: Dict=4 , _SCREAMING_SNAKE_CASE: Optional[Any]=1536 , _SCREAMING_SNAKE_CASE: List[str]=1536 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: str=0.0 , _SCREAMING_SNAKE_CASE: List[str]=5_0257 , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: List[str]=True , _SCREAMING_SNAKE_CASE: Optional[int]="gelu" , _SCREAMING_SNAKE_CASE: Tuple=256 , _SCREAMING_SNAKE_CASE: str=0.0 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: List[Any]=0.02 , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: Union[str, Any]=1500 , _SCREAMING_SNAKE_CASE: str=448 , _SCREAMING_SNAKE_CASE: Any=5_0256 , _SCREAMING_SNAKE_CASE: Any=5_0256 , _SCREAMING_SNAKE_CASE: List[str]=5_0256 , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: List[Any]=[220, 5_0256] , _SCREAMING_SNAKE_CASE: Dict=False , _SCREAMING_SNAKE_CASE: str=256 , _SCREAMING_SNAKE_CASE: List[str]=False , _SCREAMING_SNAKE_CASE: Tuple=0.05 , _SCREAMING_SNAKE_CASE: List[str]=10 , _SCREAMING_SNAKE_CASE: str=2 , _SCREAMING_SNAKE_CASE: Any=0.0 , _SCREAMING_SNAKE_CASE: Optional[int]=10 , _SCREAMING_SNAKE_CASE: int=0 , _SCREAMING_SNAKE_CASE: Any=7 , **_SCREAMING_SNAKE_CASE: List[str] , ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = vocab_size
__lowerCAmelCase : Optional[Any] = num_mel_bins
__lowerCAmelCase : int = d_model
__lowerCAmelCase : List[Any] = encoder_layers
__lowerCAmelCase : List[Any] = encoder_attention_heads
__lowerCAmelCase : List[str] = decoder_layers
__lowerCAmelCase : Tuple = decoder_attention_heads
__lowerCAmelCase : Any = decoder_ffn_dim
__lowerCAmelCase : Tuple = encoder_ffn_dim
__lowerCAmelCase : List[str] = dropout
__lowerCAmelCase : Union[str, Any] = attention_dropout
__lowerCAmelCase : Union[str, Any] = activation_dropout
__lowerCAmelCase : Dict = activation_function
__lowerCAmelCase : Tuple = init_std
__lowerCAmelCase : str = encoder_layerdrop
__lowerCAmelCase : int = decoder_layerdrop
__lowerCAmelCase : Optional[int] = use_cache
__lowerCAmelCase : Union[str, Any] = encoder_layers
__lowerCAmelCase : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase : int = max_source_positions
__lowerCAmelCase : Any = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase : Dict = classifier_proj_size
__lowerCAmelCase : Dict = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase : int = apply_spec_augment
__lowerCAmelCase : Union[str, Any] = mask_time_prob
__lowerCAmelCase : str = mask_time_length
__lowerCAmelCase : int = mask_time_min_masks
__lowerCAmelCase : List[Any] = mask_feature_prob
__lowerCAmelCase : Tuple = mask_feature_length
__lowerCAmelCase : Any = mask_feature_min_masks
__lowerCAmelCase : Union[str, Any] = median_filter_width
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , suppress_tokens=_SCREAMING_SNAKE_CASE , begin_suppress_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__lowerCAmelCase : List[str] = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
])
if self.use_past:
__lowerCAmelCase : Tuple = {0: "batch"}
else:
__lowerCAmelCase : Union[str, Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction="inputs")
return common_inputs
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: Optional["TensorType"] = None , _SCREAMING_SNAKE_CASE: int = 2_2050 , _SCREAMING_SNAKE_CASE: float = 5.0 , _SCREAMING_SNAKE_CASE: int = 220 , ) -> Mapping[str, Any]:
"""simple docstring"""
__lowerCAmelCase : int = OrderedDict()
__lowerCAmelCase : Optional[Any] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , time_duration=_SCREAMING_SNAKE_CASE , frequency=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[str] = encoder_inputs["input_features"].shape[2]
__lowerCAmelCase : List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
__lowerCAmelCase : List[Any] = super().generate_dummy_inputs(
preprocessor.tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = encoder_inputs.pop("input_features")
__lowerCAmelCase : List[Any] = decoder_inputs.pop("decoder_input_ids")
if "past_key_values" in decoder_inputs:
__lowerCAmelCase : int = decoder_inputs.pop("past_key_values")
return dummy_inputs
@property
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> float:
"""simple docstring"""
return 1e-3 | 269 | 0 |
"""simple docstring"""
from typing import Any
import numpy as np
def a__ ( SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
return np.array_equal(SCREAMING_SNAKE_CASE , matrix.conjugate().T )
def a__ ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = v.conjugate().T
lowerCAmelCase : int = v_star.dot(SCREAMING_SNAKE_CASE )
assert isinstance(SCREAMING_SNAKE_CASE , np.ndarray )
return (v_star_dot.dot(SCREAMING_SNAKE_CASE )) / (v_star.dot(SCREAMING_SNAKE_CASE ))
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Dict = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
lowerCAmelCase : Any = np.array([[1], [2], [3]] )
assert is_hermitian(SCREAMING_SNAKE_CASE ), f"""{a} is not hermitian."""
print(rayleigh_quotient(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : Dict = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(SCREAMING_SNAKE_CASE ), f"""{a} is not hermitian."""
assert rayleigh_quotient(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 108 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__snake_case : Optional[int] = 50_000
__snake_case : Dict = 5_000
__snake_case , __snake_case : Union[str, Any] = os.path.split(__file__)
__snake_case : Any = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
for i in range(__snake_case ):
__lowerCAmelCase : Union[str, Any] = dataset[i]
@get_duration
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Dict:
for i in range(0 ,len(__snake_case ) ,__snake_case ):
__lowerCAmelCase : List[str] = dataset[i : i + batch_size]
@get_duration
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Dict:
with dataset.formatted_as(type=__snake_case ):
for i in range(__snake_case ):
__lowerCAmelCase : Union[str, Any] = dataset[i]
@get_duration
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ) -> str:
with dataset.formatted_as(type=__snake_case ):
for i in range(0 ,__snake_case ,__snake_case ):
__lowerCAmelCase : Optional[int] = dataset[i : i + batch_size]
def _lowercase ( ) -> Union[str, Any]:
__lowerCAmelCase : Optional[int] = {"num examples": SPEED_TEST_N_EXAMPLES}
__lowerCAmelCase : Optional[int] = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
__lowerCAmelCase : Any = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
__lowerCAmelCase : int = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
__lowerCAmelCase : str = generate_example_dataset(
os.path.join(__snake_case ,"dataset.arrow" ) ,__snake_case ,num_examples=__snake_case ,seq_shapes={"list": (100,)} ,)
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ ,str(__snake_case ) )
__lowerCAmelCase : str = func(__snake_case ,**__snake_case )
print("shuffling dataset" )
__lowerCAmelCase : Optional[int] = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " ,func.__name__ ,str(__snake_case ) )
__lowerCAmelCase : List[str] = func(
__snake_case ,**__snake_case )
with open(__snake_case ,"wb" ) as f:
f.write(json.dumps(__snake_case ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating() | 269 | 0 |
"""simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
A: Any = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
A: int = "main"
# Default branch name
A: Tuple = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
A: List[str] = "aaaaaaa"
# This commit does not exist, so we should 404.
A: Any = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
A: Optional[Any] = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def _snake_case ( ):
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def _snake_case ( ):
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , ["""labels"""] )
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , ["""start_positions""", """end_positions"""] )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
pass
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , ["""labels"""] )
@require_tf
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , ["""labels"""] )
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , ["""start_positions""", """end_positions"""] )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
pass
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , ["""labels"""] )
@require_flax
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , [] )
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , [] )
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , [] )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
pass
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , [] )
| 109 |
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[str]=13 , _SCREAMING_SNAKE_CASE: Tuple=7 , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: str=False , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: int=99 , _SCREAMING_SNAKE_CASE: int=32 , _SCREAMING_SNAKE_CASE: List[str]=5 , _SCREAMING_SNAKE_CASE: Union[str, Any]=4 , _SCREAMING_SNAKE_CASE: int=64 , _SCREAMING_SNAKE_CASE: List[str]="gelu" , _SCREAMING_SNAKE_CASE: str=0.1 , _SCREAMING_SNAKE_CASE: Any=0.1 , _SCREAMING_SNAKE_CASE: Optional[int]=512 , _SCREAMING_SNAKE_CASE: Tuple=16 , _SCREAMING_SNAKE_CASE: Any=2 , _SCREAMING_SNAKE_CASE: List[str]=0.02 , _SCREAMING_SNAKE_CASE: Tuple=3 , _SCREAMING_SNAKE_CASE: Optional[Any]=4 , _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: int=2 , _SCREAMING_SNAKE_CASE: str=2 , _SCREAMING_SNAKE_CASE: Union[str, Any]=2 , _SCREAMING_SNAKE_CASE: List[Any]=2 , _SCREAMING_SNAKE_CASE: int=4 , _SCREAMING_SNAKE_CASE: List[str]=1 , ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = parent
__lowerCAmelCase : Optional[Any] = batch_size
__lowerCAmelCase : Union[str, Any] = seq_length
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : Optional[int] = use_input_mask
__lowerCAmelCase : Dict = use_token_type_ids
__lowerCAmelCase : Dict = use_labels
__lowerCAmelCase : Dict = vocab_size
__lowerCAmelCase : Tuple = hidden_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : Tuple = intermediate_size
__lowerCAmelCase : List[Any] = hidden_act
__lowerCAmelCase : Optional[Any] = hidden_dropout_prob
__lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : Union[str, Any] = type_vocab_size
__lowerCAmelCase : Optional[int] = type_sequence_label_size
__lowerCAmelCase : Dict = initializer_range
__lowerCAmelCase : Tuple = num_labels
__lowerCAmelCase : Optional[Any] = num_choices
__lowerCAmelCase : Union[str, Any] = scope
__lowerCAmelCase : Optional[Any] = q_groups
__lowerCAmelCase : Optional[int] = k_groups
__lowerCAmelCase : Any = v_groups
__lowerCAmelCase : int = post_attention_groups
__lowerCAmelCase : List[str] = intermediate_groups
__lowerCAmelCase : Optional[Any] = output_groups
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
__lowerCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length])
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : str = None
if self.use_labels:
__lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices)
__lowerCAmelCase : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Tuple) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[Any] = SqueezeBertModel(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Tuple) -> Dict:
"""simple docstring"""
__lowerCAmelCase : int = SqueezeBertForMaskedLM(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : str = SqueezeBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Union[str, Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Tuple) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.num_labels
__lowerCAmelCase : Union[str, Any] = SqueezeBertForSequenceClassification(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.num_labels
__lowerCAmelCase : Optional[int] = SqueezeBertForTokenClassification(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.num_choices
__lowerCAmelCase : str = SqueezeBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : int = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase : Union[str, Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase : str = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _SCREAMING_SNAKE_CASE ( self: str) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)) : Union[str, Any] = config_and_inputs
__lowerCAmelCase : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = SqueezeBertModelTester(self)
__lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , dim=37)
def _SCREAMING_SNAKE_CASE ( self: Any) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE)
@slow
def _SCREAMING_SNAKE_CASE ( self: Any) -> Dict:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[Any] = SqueezeBertModel.from_pretrained(_SCREAMING_SNAKE_CASE)
self.assertIsNotNone(_SCREAMING_SNAKE_CASE)
@require_sentencepiece
@require_tokenizers
@require_torch
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: int) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli")
__lowerCAmelCase : List[Any] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]])
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE)[0]
__lowerCAmelCase : Any = torch.Size((1, 3))
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = torch.tensor([[0.6401, -0.0349, -0.6041]])
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-4)) | 269 | 0 |
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return np.dot(__snake_case , __snake_case )
class lowerCAmelCase__ :
def __init__( self , *,
__SCREAMING_SNAKE_CASE = np.inf , __SCREAMING_SNAKE_CASE = "linear" , __SCREAMING_SNAKE_CASE = 0.0 , ):
"""simple docstring"""
lowercase_ : Any = regularization
lowercase_ : str = gamma
if kernel == "linear":
lowercase_ : List[str] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
lowercase_ : Dict = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowercase_ : List[Any] = F'''Unknown kernel: {kernel}'''
raise ValueError(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[int] = observations
lowercase_ : List[str] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
(lowercase_ ) : Dict = np.shape(_SCREAMING_SNAKE_CASE )
def to_minimize(__SCREAMING_SNAKE_CASE ) -> float:
lowercase_ : Optional[Any] = 0
(lowercase_ ) : List[str] = np.shape(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = LinearConstraint(_SCREAMING_SNAKE_CASE , 0 , 0 )
lowercase_ : List[Any] = Bounds(0 , self.regularization )
lowercase_ : Optional[int] = minimize(
_SCREAMING_SNAKE_CASE , np.ones(_SCREAMING_SNAKE_CASE ) , bounds=_SCREAMING_SNAKE_CASE , constraints=[ly_contraint] ).x
lowercase_ : str = l_star
# calculating mean offset of separation plane to points
lowercase_ : str = 0
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
lowercase_ : Dict = s / n
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _SCREAMING_SNAKE_CASE )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 |
"""simple docstring"""
import os
from math import logaa
def _lowercase ( __snake_case = "base_exp.txt" ) -> int:
__lowerCAmelCase : float = 0
__lowerCAmelCase : Any = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__snake_case ) ,__snake_case ) ) ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = list(map(__snake_case ,line.split("," ) ) )
if x * logaa(__snake_case ) > largest:
__lowerCAmelCase : Tuple = x * logaa(__snake_case )
__lowerCAmelCase : Optional[Any] = i + 1
return result
if __name__ == "__main__":
print(solution()) | 269 | 0 |
"""simple docstring"""
import datasets
_snake_case = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
_snake_case = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
_snake_case = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def _lowercase ( self : Dict ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def _lowercase ( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str ) -> int:
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
| 294 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _lowercase ( __snake_case ) -> List[str]:
if isinstance(__snake_case ,collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class A__ :
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Optional[Any]) -> str:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: str) -> int:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Tuple:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: float) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : str = np.abs((a - b)).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , F"""Difference between torch and flax is {diff} (>= {tol}).""")
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Any=None , **_SCREAMING_SNAKE_CASE: Tuple) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim))
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Any]=None , **_SCREAMING_SNAKE_CASE: List[str]) -> Dict:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim))
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any]=None , **_SCREAMING_SNAKE_CASE: Tuple) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = after_output[0]
__lowerCAmelCase : Any = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Any=None , **_SCREAMING_SNAKE_CASE: List[str]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Any = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model(
input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase : List[str] = to_atuple(vision_model.config.image_size)
__lowerCAmelCase : Any = to_atuple(vision_model.config.patch_size)
__lowerCAmelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCAmelCase : Tuple = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
__lowerCAmelCase : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: int) -> str:
"""simple docstring"""
pt_model.to(_SCREAMING_SNAKE_CASE)
pt_model.eval()
# prepare inputs
__lowerCAmelCase : Union[str, Any] = inputs_dict
__lowerCAmelCase : Union[str, Any] = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowerCAmelCase : Any = pt_model(**_SCREAMING_SNAKE_CASE).to_tuple()
__lowerCAmelCase : List[Any] = fx_model(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2)
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = fx_model_loaded(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2)
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE)
pt_model_loaded.to(_SCREAMING_SNAKE_CASE)
pt_model_loaded.eval()
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = pt_model_loaded(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output_loaded.numpy() , 4e-2)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Dict) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = fx_state
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Dict) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params)
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Dict) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.prepare_config_and_inputs()
self.check_save_load(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: int) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_SCREAMING_SNAKE_CASE)
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Dict = self.prepare_config_and_inputs()
__lowerCAmelCase : List[Any] = config_inputs_dict.pop("vision_config")
__lowerCAmelCase : str = config_inputs_dict.pop("text_config")
__lowerCAmelCase : Union[str, Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.check_equivalence_flax_to_pt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
@slow
def _SCREAMING_SNAKE_CASE ( self: str) -> Dict:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Dict = self.get_pretrained_model_and_inputs()
__lowerCAmelCase : Union[str, Any] = model_a(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model_a(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = after_outputs[0]
__lowerCAmelCase : List[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-5)
@require_flax
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = 13
__lowerCAmelCase : Optional[int] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
__lowerCAmelCase : List[Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
__lowerCAmelCase : List[Any] = random_attention_mask([batch_size, 4])
__lowerCAmelCase : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[str] = FlaxViTModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = FlaxBertModel(_SCREAMING_SNAKE_CASE)
return vision_model, text_model
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = FlaxViTModelTester(self)
__lowerCAmelCase : Optional[Any] = FlaxBertModelTester(self)
__lowerCAmelCase : int = vit_model_tester.prepare_config_and_inputs()
__lowerCAmelCase : List[str] = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase : Tuple = vision_config_and_inputs
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = 13
__lowerCAmelCase : List[str] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
__lowerCAmelCase : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
__lowerCAmelCase : str = random_attention_mask([batch_size, 4])
__lowerCAmelCase : Optional[int] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : int = FlaxCLIPVisionModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = FlaxBertModel(_SCREAMING_SNAKE_CASE)
return vision_model, text_model
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = FlaxCLIPVisionModelTester(self)
__lowerCAmelCase : str = FlaxBertModelTester(self)
__lowerCAmelCase : Optional[Any] = clip_model_tester.prepare_config_and_inputs()
__lowerCAmelCase : Dict = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase : Any = vision_config_and_inputs
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0)
__lowerCAmelCase : str = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian")
__lowerCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
__lowerCAmelCase : Optional[int] = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="np")
__lowerCAmelCase : List[str] = model(**_SCREAMING_SNAKE_CASE)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowerCAmelCase : List[str] = np.array([[1.228_4727, 0.310_4122]])
self.assertTrue(np.allclose(outputs.logits_per_image , _SCREAMING_SNAKE_CASE , atol=1e-3)) | 269 | 0 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowercase_ = logging.getLogger(__name__)
class __UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _A : Union[str, Any]=-1 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = label_idx
def UpperCAmelCase__ ( self : Optional[Any] , _A : Any , _A : Union[Split, str] ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : int = mode.value
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , F'''{mode}.txt''' )
__SCREAMING_SNAKE_CASE : str = 1
__SCREAMING_SNAKE_CASE : Optional[int] = []
with open(_SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as f:
__SCREAMING_SNAKE_CASE : Dict = []
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) )
guid_index += 1
__SCREAMING_SNAKE_CASE : Tuple = []
__SCREAMING_SNAKE_CASE : str = []
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = line.split(''' ''' )
words.append(splits[0] )
if len(_SCREAMING_SNAKE_CASE ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) )
return examples
def UpperCAmelCase__ ( self : Optional[Any] , _A : TextIO , _A : TextIO , _A : List ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(_SCREAMING_SNAKE_CASE )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__SCREAMING_SNAKE_CASE : List[str] = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(_SCREAMING_SNAKE_CASE )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] )
def UpperCAmelCase__ ( self : int , _A : str ):
"""simple docstring"""
if path:
with open(_SCREAMING_SNAKE_CASE , '''r''' ) as f:
__SCREAMING_SNAKE_CASE : int = f.read().splitlines()
if "O" not in labels:
__SCREAMING_SNAKE_CASE : str = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Union[str, Any] ):
"""simple docstring"""
super().__init__(label_idx=-2 )
def UpperCAmelCase__ ( self : Tuple , _A : str ):
"""simple docstring"""
if path:
with open(_SCREAMING_SNAKE_CASE , '''r''' ) as f:
__SCREAMING_SNAKE_CASE : str = f.read().splitlines()
if "O" not in labels:
__SCREAMING_SNAKE_CASE : str = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[int] , _A : int , _A : Union[Split, str] ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : str = mode.value
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , F'''{mode}.txt''' )
__SCREAMING_SNAKE_CASE : Tuple = 1
__SCREAMING_SNAKE_CASE : int = []
with open(_SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as f:
for sentence in parse_incr(_SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : str = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) )
guid_index += 1
return examples
def UpperCAmelCase__ ( self : Tuple , _A : TextIO , _A : TextIO , _A : List ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = 0
for sentence in parse_incr(_SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : Dict = preds_list[example_id]
__SCREAMING_SNAKE_CASE : Tuple = ""
for token in sentence:
out += F'''{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(_SCREAMING_SNAKE_CASE )
example_id += 1
def UpperCAmelCase__ ( self : Optional[int] , _A : str ):
"""simple docstring"""
if path:
with open(_SCREAMING_SNAKE_CASE , '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 303 |
"""simple docstring"""
from __future__ import annotations
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ) -> list:
__lowerCAmelCase : Dict = []
__lowerCAmelCase , __lowerCAmelCase : Any = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__lowerCAmelCase : int = result + left + right
return input_list
def _lowercase ( __snake_case ) -> list:
if len(__snake_case ) <= 1:
return input_list
__lowerCAmelCase : int = list(__snake_case )
# iteration for two-way merging
__lowerCAmelCase : Optional[int] = 2
while p <= len(__snake_case ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 ,len(__snake_case ) ,__snake_case ):
__lowerCAmelCase : Union[str, Any] = i
__lowerCAmelCase : Tuple = i + p - 1
__lowerCAmelCase : Optional[Any] = (low + high + 1) // 2
__lowerCAmelCase : Any = merge(__snake_case ,__snake_case ,__snake_case ,__snake_case )
# final merge of last two parts
if p * 2 >= len(__snake_case ):
__lowerCAmelCase : Optional[Any] = i
__lowerCAmelCase : Union[str, Any] = merge(__snake_case ,0 ,__snake_case ,len(__snake_case ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__snake_case : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
__snake_case : Optional[int] = []
else:
__snake_case : int = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted)) | 269 | 0 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class A__ :
def __init__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
__lowercase = img
__lowercase = img.shape[1]
__lowercase = img.shape[0]
__lowercase = dst_width
__lowercase = dst_height
__lowercase = self.src_w / self.dst_w
__lowercase = self.src_h / self.dst_h
__lowercase = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__lowercase = self.img[self.get_y(_SCREAMING_SNAKE_CASE )][self.get_x(_SCREAMING_SNAKE_CASE )]
def a__ ( self : Union[str, Any] , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return int(self.ratio_x * x )
def a__ ( self : str , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = 800, 600
SCREAMING_SNAKE_CASE__ = imread("""image_data/lena.jpg""", 1)
SCREAMING_SNAKE_CASE__ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 325 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small")
__lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("google/mt5-small")
__lowerCAmelCase : Tuple = tokenizer("Hello there" , return_tensors="np").input_ids
__lowerCAmelCase : Dict = tokenizer("Hi I am" , return_tensors="np").input_ids
__lowerCAmelCase : str = shift_tokens_right(_SCREAMING_SNAKE_CASE , model.config.pad_token_id , model.config.decoder_start_token_id)
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE).logits
__lowerCAmelCase : int = optax.softmax_cross_entropy(_SCREAMING_SNAKE_CASE , onehot(_SCREAMING_SNAKE_CASE , logits.shape[-1])).mean()
__lowerCAmelCase : List[str] = -(labels.shape[-1] * loss.item())
__lowerCAmelCase : str = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) | 269 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] = 50_00_00_00 ) -> int:
__lowerCamelCase : List[Any] = set()
__lowerCamelCase : Union[str, Any] = int((limit - 24) ** (1 / 2) )
__lowerCamelCase : Dict = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , __snake_case ) ) )
for primea in primes:
__lowerCamelCase : List[str] = primea * primea
for primea in primes:
__lowerCamelCase : List[Any] = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
__lowerCamelCase : Tuple = primea * primea * primea * primea
__lowerCamelCase : Dict = square + cube + tetr
if total >= limit:
break
ret.add(__snake_case )
return len(__snake_case )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 185 |
"""simple docstring"""
import re
def _lowercase ( __snake_case ) -> str:
if len(re.findall("[ATCG]" ,__snake_case ) ) != len(__snake_case ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" ,"TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 269 | 0 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def lowerCamelCase__ ( __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if isinstance(__snake_case , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _lowerCAmelCase :
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> str:
pass
def __a ( self ) -> int:
pass
def __a ( self ) -> Tuple:
pass
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
lowerCAmelCase_ = np.abs((a - b) ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , **_UpperCamelCase ) -> List[str]:
lowerCAmelCase_ = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , **_UpperCamelCase ) -> Dict:
lowerCAmelCase_ = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = {"vision_model": vision_model, "text_model": text_model}
lowerCAmelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , **_UpperCamelCase ) -> str:
lowerCAmelCase_ = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = {"vision_model": vision_model, "text_model": text_model}
lowerCAmelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = after_output[0]
lowerCAmelCase_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , **_UpperCamelCase ) -> Optional[int]:
lowerCAmelCase_ = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = {"vision_model": vision_model, "text_model": text_model}
lowerCAmelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = model(
input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase_ = to_atuple(vision_model.config.image_size )
lowerCAmelCase_ = to_atuple(vision_model.config.patch_size )
lowerCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase_ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase_ = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
pt_model.to(_SCREAMING_SNAKE_CASE )
pt_model.eval()
# prepare inputs
lowerCAmelCase_ = inputs_dict
lowerCAmelCase_ = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowerCAmelCase_ = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
lowerCAmelCase_ = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = fx_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = VisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE )
pt_model_loaded.to(_SCREAMING_SNAKE_CASE )
pt_model_loaded.eval()
with torch.no_grad():
lowerCAmelCase_ = pt_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output_loaded.numpy() , 4e-2 )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase_ = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = fx_state
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
lowerCAmelCase_ = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params )
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __a ( self ) -> str:
lowerCAmelCase_ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_SCREAMING_SNAKE_CASE )
def __a ( self ) -> int:
lowerCAmelCase_ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_SCREAMING_SNAKE_CASE )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.prepare_config_and_inputs()
self.check_save_load(**_SCREAMING_SNAKE_CASE )
def __a ( self ) -> Dict:
lowerCAmelCase_ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_SCREAMING_SNAKE_CASE )
@is_pt_flax_cross_test
def __a ( self ) -> Any:
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ = config_inputs_dict.pop("vision_config" )
lowerCAmelCase_ = config_inputs_dict.pop("text_config" )
lowerCAmelCase_ = config_inputs_dict
self.check_equivalence_pt_to_flax(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.check_equivalence_flax_to_pt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __a ( self ) -> Dict:
lowerCAmelCase_ = self.get_pretrained_model_and_inputs()
lowerCAmelCase_ = model_a(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = model_a(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = after_outputs[0]
lowerCAmelCase_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-5 )
@require_flax
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase_ = 13
lowerCAmelCase_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCAmelCase_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowerCAmelCase_ = random_attention_mask([batch_size, 4] )
lowerCAmelCase_ = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
lowerCAmelCase_ = FlaxViTModel(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
return vision_model, text_model
def __a ( self ) -> int:
lowerCAmelCase_ = FlaxViTModelTester(self )
lowerCAmelCase_ = FlaxBertModelTester(self )
lowerCAmelCase_ = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase_ = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase_ = vision_config_and_inputs
lowerCAmelCase_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
def __a ( self ) -> List[str]:
lowerCAmelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase_ = 13
lowerCAmelCase_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCAmelCase_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowerCAmelCase_ = random_attention_mask([batch_size, 4] )
lowerCAmelCase_ = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> int:
lowerCAmelCase_ = FlaxCLIPVisionModel(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
return vision_model, text_model
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = FlaxCLIPVisionModelTester(self )
lowerCAmelCase_ = FlaxBertModelTester(self )
lowerCAmelCase_ = clip_model_tester.prepare_config_and_inputs()
lowerCAmelCase_ = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase_ = vision_config_and_inputs
lowerCAmelCase_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> List[str]:
lowerCAmelCase_ = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 )
lowerCAmelCase_ = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
lowerCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowerCAmelCase_ = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="np" )
lowerCAmelCase_ = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCAmelCase_ = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
| 231 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = FunnelTokenizer
SCREAMING_SNAKE_CASE = FunnelTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Optional[int]:
"""simple docstring"""
super().setUp()
__lowerCAmelCase : str = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , **_SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any , **_SCREAMING_SNAKE_CASE: Any) -> str:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: str) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = "UNwant\u00E9d,running"
__lowerCAmelCase : str = "unwanted, running"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Any = self.tokenizer_class(self.vocab_file)
__lowerCAmelCase : Any = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE) , [7, 4, 5, 10, 8, 9])
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.get_tokenizers(do_lower_case=_SCREAMING_SNAKE_CASE)
for tokenizer in tokenizers:
__lowerCAmelCase : List[str] = tokenizer("UNwant\u00E9d,running")
__lowerCAmelCase : Optional[int] = len(inputs["input_ids"]) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len)
__lowerCAmelCase : List[str] = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running")
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len) | 269 | 0 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__A = {'UserAgent': UserAgent().random}
def UpperCamelCase__ ( lowercase__ : Dict ):
snake_case : str = script.contents[0]
snake_case : Union[str, Any] = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowerCamelCase__ :
def __init__( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Optional[Any] = F'''https://www.instagram.com/{username}/'''
snake_case : Optional[Any] = self.get_json()
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Union[str, Any] = requests.get(self.url , headers=_SCREAMING_SNAKE_CASE ).text
snake_case : Any = BeautifulSoup(_SCREAMING_SNAKE_CASE , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
"""simple docstring"""
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
"""simple docstring"""
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.user_data["username"]
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.user_data["full_name"]
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.user_data["biography"]
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.user_data["business_email"]
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.user_data["external_url"]
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.user_data["edge_followed_by"]["count"]
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.user_data["edge_follow"]["count"]
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.user_data["profile_pic_url_hd"]
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.user_data["is_verified"]
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.user_data["is_private"]
def UpperCamelCase__ ( lowercase__ : Dict = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
snake_case : Optional[Any] = InstagramUser(__snake_case )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __snake_case )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 12_0000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = InstagramUser("github")
print(instagram_user)
print(f'{instagram_user.number_of_posts = }')
print(f'{instagram_user.number_of_followers = }')
print(f'{instagram_user.number_of_followings = }')
print(f'{instagram_user.email = }')
print(f'{instagram_user.website = }')
print(f'{instagram_user.profile_picture_url = }')
print(f'{instagram_user.is_verified = }')
print(f'{instagram_user.is_private = }')
| 148 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _lowercase ( __snake_case = "laptop" ) -> DataFrame:
__lowerCAmelCase : str = F"""https://www.amazon.in/laptop/s?k={product}"""
__lowerCAmelCase : Union[str, Any] = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
__lowerCAmelCase : List[str] = BeautifulSoup(requests.get(__snake_case ,headers=__snake_case ).text )
# Initialize a Pandas dataframe with the column titles
__lowerCAmelCase : Dict = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" ,attrs={"class": "s-result-item", "data-component-type": "s-search-result"} ,) ,soup.find_all("div" ,attrs={"class": "a-row a-size-base a-color-base"} ) ,):
try:
__lowerCAmelCase : Any = item.ha.text
__lowerCAmelCase : Union[str, Any] = "https://www.amazon.in/" + item.ha.a["href"]
__lowerCAmelCase : Any = item.find("span" ,attrs={"class": "a-offscreen"} ).text
try:
__lowerCAmelCase : Union[str, Any] = item.find("span" ,attrs={"class": "a-icon-alt"} ).text
except AttributeError:
__lowerCAmelCase : Optional[Any] = "Not available"
try:
__lowerCAmelCase : Union[str, Any] = (
"₹"
+ item.find(
"span" ,attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
__lowerCAmelCase : Dict = ""
try:
__lowerCAmelCase : str = float(
(
(
float(product_mrp.strip("₹" ).replace("," ,"" ) )
- float(product_price.strip("₹" ).replace("," ,"" ) )
)
/ float(product_mrp.strip("₹" ).replace("," ,"" ) )
)
* 100 )
except ValueError:
__lowerCAmelCase : List[str] = float("nan" )
except AttributeError:
pass
__lowerCAmelCase : int = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__lowerCAmelCase : Union[str, Any] = " "
__lowerCAmelCase : Union[str, Any] = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__snake_case : Any = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""") | 269 | 0 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
snake_case_ : Any = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __snake_case ( nn.Module ):
def __init__( self : Dict , _snake_case : int):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = torchvision.models.resnetaaa(pretrained=_SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = list(model.children())[:-2]
UpperCAmelCase_ = nn.Sequential(*_SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds])
def lowerCamelCase ( self : Tuple , _snake_case : Any):
"""simple docstring"""
UpperCAmelCase_ = self.pool(self.model(_SCREAMING_SNAKE_CASE))
UpperCAmelCase_ = torch.flatten(_SCREAMING_SNAKE_CASE , start_dim=2)
UpperCAmelCase_ = out.transpose(1 , 2).contiguous()
return out # BxNx2048
class __snake_case ( __SCREAMING_SNAKE_CASE ):
def __init__( self : int , _snake_case : Any , _snake_case : List[str] , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = [json.loads(_SCREAMING_SNAKE_CASE) for l in open(_SCREAMING_SNAKE_CASE)]
UpperCAmelCase_ = os.path.dirname(_SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = tokenizer
UpperCAmelCase_ = labels
UpperCAmelCase_ = len(_SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = max_seq_length
UpperCAmelCase_ = transforms
def __len__( self : int):
"""simple docstring"""
return len(self.data)
def __getitem__( self : Optional[Any] , _snake_case : str):
"""simple docstring"""
UpperCAmelCase_ = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=_SCREAMING_SNAKE_CASE))
UpperCAmelCase_ = sentence[0], sentence[1:-1], sentence[-1]
UpperCAmelCase_ = sentence[: self.max_seq_length]
UpperCAmelCase_ = torch.zeros(self.n_classes)
UpperCAmelCase_ = 1
UpperCAmelCase_ = Image.open(os.path.join(self.data_dir , self.data[index]['''img'''])).convert('''RGB''')
UpperCAmelCase_ = self.transforms(_SCREAMING_SNAKE_CASE)
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = Counter()
for row in self.data:
label_freqs.update(row['''label'''])
return label_freqs
def A (__A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = [len(row['''sentence'''] ) for row in batch]
UpperCAmelCase_ = len(__snake_case ), max(__snake_case )
UpperCAmelCase_ = torch.zeros(__snake_case , __snake_case , dtype=torch.long )
UpperCAmelCase_ = torch.zeros(__snake_case , __snake_case , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__snake_case , __snake_case ) ):
UpperCAmelCase_ = input_row["sentence"]
UpperCAmelCase_ = 1
UpperCAmelCase_ = torch.stack([row['''image'''] for row in batch] )
UpperCAmelCase_ = torch.stack([row['''label'''] for row in batch] )
UpperCAmelCase_ = torch.stack([row['''image_start_token'''] for row in batch] )
UpperCAmelCase_ = torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def A () -> List[Any]:
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def A () -> List[str]:
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 51 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=_SCREAMING_SNAKE_CASE , )
assert hasattr(self , "env")
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Any=1) -> Dict:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=_SCREAMING_SNAKE_CASE , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: List[Any]) -> Optional[Any]:
"""simple docstring"""
TrainingJobAnalytics(_SCREAMING_SNAKE_CASE).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""")
def _SCREAMING_SNAKE_CASE ( self: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__lowerCAmelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
__lowerCAmelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
__lowerCAmelCase : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCAmelCase : Tuple = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 99_9999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , _SCREAMING_SNAKE_CASE) | 269 | 0 |
def A ( a_ ,a_ ,a_ ) -> int:
def update_area_of_max_square(a_ ,a_ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__UpperCamelCase : Tuple =update_area_of_max_square(__snake_case ,col + 1 )
__UpperCamelCase : Union[str, Any] =update_area_of_max_square(row + 1 ,col + 1 )
__UpperCamelCase : Union[str, Any] =update_area_of_max_square(row + 1 ,__snake_case )
if mat[row][col]:
__UpperCamelCase : Optional[int] =1 + min([right, diagonal, down] )
__UpperCamelCase : Optional[Any] =max(largest_square_area[0] ,__snake_case )
return sub_problem_sol
else:
return 0
__UpperCamelCase : Union[str, Any] =[0]
update_area_of_max_square(0 ,0 )
return largest_square_area[0]
def A ( a_ ,a_ ,a_ ) -> int:
def update_area_of_max_square_using_dp_array(
a_ ,a_ ,a_ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__UpperCamelCase : Optional[Any] =update_area_of_max_square_using_dp_array(__snake_case ,col + 1 ,__snake_case )
__UpperCamelCase : Dict =update_area_of_max_square_using_dp_array(row + 1 ,col + 1 ,__snake_case )
__UpperCamelCase : Optional[Any] =update_area_of_max_square_using_dp_array(row + 1 ,__snake_case ,__snake_case )
if mat[row][col]:
__UpperCamelCase : int =1 + min([right, diagonal, down] )
__UpperCamelCase : Union[str, Any] =max(largest_square_area[0] ,__snake_case )
__UpperCamelCase : Tuple =sub_problem_sol
return sub_problem_sol
else:
return 0
__UpperCamelCase : Optional[Any] =[0]
__UpperCamelCase : Union[str, Any] =[[-1] * cols for _ in range(__snake_case )]
update_area_of_max_square_using_dp_array(0 ,0 ,__snake_case )
return largest_square_area[0]
def A ( a_ ,a_ ,a_ ) -> int:
__UpperCamelCase : Union[str, Any] =[[0] * (cols + 1) for _ in range(rows + 1 )]
__UpperCamelCase : Optional[int] =0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
__UpperCamelCase : str =dp_array[row][col + 1]
__UpperCamelCase : int =dp_array[row + 1][col + 1]
__UpperCamelCase : Union[str, Any] =dp_array[row + 1][col]
if mat[row][col] == 1:
__UpperCamelCase : Dict =1 + min(__snake_case ,__snake_case ,__snake_case )
__UpperCamelCase : Optional[Any] =max(dp_array[row][col] ,__snake_case )
else:
__UpperCamelCase : int =0
return largest_square_area
def A ( a_ ,a_ ,a_ ) -> int:
__UpperCamelCase : Union[str, Any] =[0] * (cols + 1)
__UpperCamelCase : Dict =[0] * (cols + 1)
__UpperCamelCase : Dict =0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
__UpperCamelCase : Optional[int] =current_row[col + 1]
__UpperCamelCase : int =next_row[col + 1]
__UpperCamelCase : List[str] =next_row[col]
if mat[row][col] == 1:
__UpperCamelCase : str =1 + min(__snake_case ,__snake_case ,__snake_case )
__UpperCamelCase : Optional[Any] =max(current_row[col] ,__snake_case )
else:
__UpperCamelCase : Union[str, Any] =0
__UpperCamelCase : Optional[int] =current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 71 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__snake_case : Optional[int] = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 269 | 0 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE__:Dict = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
SCREAMING_SNAKE_CASE__:int = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
SCREAMING_SNAKE_CASE__:List[Any] = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
def a__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
if return_pvalue:
__a = pearsonr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0] )}
| 261 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case : Optional[int] = logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'maskformer-swin'
SCREAMING_SNAKE_CASE = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: int=224 , _SCREAMING_SNAKE_CASE: Tuple=4 , _SCREAMING_SNAKE_CASE: int=3 , _SCREAMING_SNAKE_CASE: List[Any]=96 , _SCREAMING_SNAKE_CASE: Union[str, Any]=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE: Any=[3, 6, 12, 24] , _SCREAMING_SNAKE_CASE: List[str]=7 , _SCREAMING_SNAKE_CASE: List[str]=4.0 , _SCREAMING_SNAKE_CASE: Optional[int]=True , _SCREAMING_SNAKE_CASE: Tuple=0.0 , _SCREAMING_SNAKE_CASE: Any=0.0 , _SCREAMING_SNAKE_CASE: Any=0.1 , _SCREAMING_SNAKE_CASE: str="gelu" , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: Union[str, Any]=0.02 , _SCREAMING_SNAKE_CASE: str=1e-5 , _SCREAMING_SNAKE_CASE: Optional[int]=None , _SCREAMING_SNAKE_CASE: str=None , **_SCREAMING_SNAKE_CASE: Union[str, Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = image_size
__lowerCAmelCase : Any = patch_size
__lowerCAmelCase : Tuple = num_channels
__lowerCAmelCase : Any = embed_dim
__lowerCAmelCase : Any = depths
__lowerCAmelCase : Dict = len(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = num_heads
__lowerCAmelCase : Tuple = window_size
__lowerCAmelCase : Dict = mlp_ratio
__lowerCAmelCase : Any = qkv_bias
__lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
__lowerCAmelCase : int = attention_probs_dropout_prob
__lowerCAmelCase : Tuple = drop_path_rate
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : Optional[int] = use_absolute_embeddings
__lowerCAmelCase : List[str] = layer_norm_eps
__lowerCAmelCase : Any = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCAmelCase : Optional[Any] = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE) - 1))
__lowerCAmelCase : Any = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(_SCREAMING_SNAKE_CASE) + 1)]
__lowerCAmelCase , __lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names) | 269 | 0 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =HfArgumentParser(__snake_case )
SCREAMING_SNAKE_CASE__ : Optional[int] =parser.parse_args_into_dataclasses()[0]
SCREAMING_SNAKE_CASE__ : Optional[int] =TensorFlowBenchmark(args=__snake_case )
try:
SCREAMING_SNAKE_CASE__ : str =parser.parse_args_into_dataclasses()[0]
except ValueError as e:
SCREAMING_SNAKE_CASE__ : Dict ="Arg --no_{0} is no longer used, please use --no-{0} instead."
SCREAMING_SNAKE_CASE__ : Tuple =" ".join(str(__snake_case ).split(''' ''' )[:-1] )
SCREAMING_SNAKE_CASE__ : Optional[Any] =""
SCREAMING_SNAKE_CASE__ : Any =eval(str(__snake_case ).split(''' ''' )[-1] )
SCREAMING_SNAKE_CASE__ : int =[]
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__snake_case )
if len(__snake_case ) > 0:
SCREAMING_SNAKE_CASE__ : int =full_error_msg + begin_error_msg + str(__snake_case )
raise ValueError(__snake_case )
benchmark.run()
if __name__ == "__main__":
main() | 152 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class A__ :
'''simple docstring'''
def __init__( self: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = psutil.Process()
__lowerCAmelCase : str = False
def _SCREAMING_SNAKE_CASE ( self: int) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = -1
while True:
__lowerCAmelCase : str = max(self.process.memory_info().rss , self.cpu_memory_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : str = threading.Thread(target=self.peak_monitor)
__lowerCAmelCase : Tuple = True
self.thread.start()
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = False
self.thread.join()
return self.cpu_memory_peak
__snake_case : Tuple = PeakCPUMemory()
def _lowercase ( ) -> str:
# Time
__lowerCAmelCase : str = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase : Optional[Any] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase : Union[str, Any] = torch.cuda.memory_allocated(__snake_case )
torch.cuda.reset_peak_memory_stats()
return measures
def _lowercase ( __snake_case ) -> Optional[Any]:
# Time
__lowerCAmelCase : str = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase : str = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
__lowerCAmelCase : List[str] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase : Union[str, Any] = (torch.cuda.memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**20
__lowerCAmelCase : Any = (torch.cuda.max_memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**20
return measures
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
print(F"""{description}:""" )
print(F"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(__snake_case )]:.2f}MiB""" )
__lowerCAmelCase : Optional[Any] = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" ) | 269 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.