code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __A :
'''simple docstring'''
lowerCAmelCase : List[str] = XGLMConfig
lowerCAmelCase : Tuple = {}
lowerCAmelCase : List[str] = "gelu"
def __init__( self : Tuple ,_snake_case : int ,_snake_case : int=14 ,_snake_case : Union[str, Any]=7 ,_snake_case : Dict=True ,_snake_case : str=True ,_snake_case : Optional[Any]=True ,_snake_case : Tuple=99 ,_snake_case : Dict=32 ,_snake_case : int=2 ,_snake_case : Any=4 ,_snake_case : Optional[int]=37 ,_snake_case : Optional[int]="gelu" ,_snake_case : List[str]=0.1 ,_snake_case : Union[str, Any]=0.1 ,_snake_case : str=512 ,_snake_case : Optional[int]=0.02 ,) -> Dict:
"""simple docstring"""
lowercase__ : int = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : List[str] = seq_length
lowercase__ : Any = is_training
lowercase__ : Dict = use_input_mask
lowercase__ : Union[str, Any] = use_labels
lowercase__ : List[Any] = vocab_size
lowercase__ : List[str] = d_model
lowercase__ : Tuple = num_hidden_layers
lowercase__ : Any = num_attention_heads
lowercase__ : Optional[Any] = ffn_dim
lowercase__ : Union[str, Any] = activation_function
lowercase__ : Optional[int] = activation_dropout
lowercase__ : Tuple = attention_dropout
lowercase__ : int = max_position_embeddings
lowercase__ : List[str] = initializer_range
lowercase__ : Any = None
lowercase__ : Any = 0
lowercase__ : Union[str, Any] = 2
lowercase__ : Tuple = 1
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) ,clip_value_min=0 ,clip_value_max=3 )
lowercase__ : Optional[Any] = None
if self.use_input_mask:
lowercase__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Union[str, Any] = self.get_config()
lowercase__ : List[Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,num_layers=self.num_hidden_layers ,attention_heads=self.num_attention_heads ,ffn_dim=self.ffn_dim ,activation_function=self.activation_function ,activation_dropout=self.activation_dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,use_cache=_snake_case ,bos_token_id=self.bos_token_id ,eos_token_id=self.eos_token_id ,pad_token_id=self.pad_token_id ,return_dict=_snake_case ,)
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
(
lowercase__
) : Dict = config_and_inputs
lowercase__ : str = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowerCAmelCase : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowerCAmelCase : List[str] = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Optional[int] = False
def UpperCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowercase__ : str = TFXGLMModelTester(self )
lowercase__ : int = ConfigTester(self ,config_class=_snake_case ,n_embd=37 )
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def UpperCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = TFXGLMModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : List[str] ,_snake_case : List[str]=True ) -> List[str]:
"""simple docstring"""
lowercase__ : List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
lowercase__ : Optional[Any] = tf.convert_to_tensor([[2, 268, 9_865]] ,dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
lowercase__ : Tuple = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
lowercase__ : Tuple = model.generate(_snake_case ,do_sample=_snake_case ,num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() ,_snake_case )
@slow
def UpperCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
lowercase__ : Dict = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
lowercase__ : Optional[int] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
lowercase__ : Tuple = tokenizer('''Today is a nice day and''' ,return_tensors='''tf''' )
lowercase__ : str = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
lowercase__ : Dict = model.generate(_snake_case ,do_sample=_snake_case ,seed=[7, 0] )
lowercase__ : Optional[Any] = tokenizer.decode(output_ids[0] ,skip_special_tokens=_snake_case )
lowercase__ : Tuple = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : Tuple = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
lowercase__ : Union[str, Any] = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
lowercase__ : Dict = '''left'''
# use different length sentences to test batching
lowercase__ : Dict = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
lowercase__ : Dict = tokenizer(_snake_case ,return_tensors='''tf''' ,padding=_snake_case )
lowercase__ : Dict = inputs['''input_ids''']
lowercase__ : Tuple = model.generate(input_ids=_snake_case ,attention_mask=inputs['''attention_mask'''] ,max_new_tokens=12 )
lowercase__ : Union[str, Any] = tokenizer(sentences[0] ,return_tensors='''tf''' ).input_ids
lowercase__ : str = model.generate(input_ids=_snake_case ,max_new_tokens=12 )
lowercase__ : Tuple = tokenizer(sentences[1] ,return_tensors='''tf''' ).input_ids
lowercase__ : List[Any] = model.generate(input_ids=_snake_case ,max_new_tokens=12 )
lowercase__ : Any = tokenizer.batch_decode(_snake_case ,skip_special_tokens=_snake_case )
lowercase__ : Optional[int] = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=_snake_case )
lowercase__ : Any = tokenizer.decode(output_padded[0] ,skip_special_tokens=_snake_case )
lowercase__ : List[Any] = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(_snake_case ,_snake_case )
self.assertListEqual(_snake_case ,[non_padded_sentence, padded_sentence] )
| 360
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
lowercase__ : List[str] = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
lowercase__ : List[str] = '''The dog is cute and lives in the garden house'''
lowercase__ : int = jnp.array([tokenizer.encode(_snake_case )] )
lowercase__ : Any = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
lowercase__ : Tuple = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
lowercase__ : Optional[Any] = model(_snake_case )['''last_hidden_state''']
self.assertEqual(output.shape ,_snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] ,_snake_case ,atol=1e-3 ) )
| 302
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
lowercase__ : List[str] = '''laion/clap-htsat-unfused'''
lowercase__ : Union[str, Any] = tempfile.mkdtemp()
def UpperCAmelCase ( self : Dict ,**_snake_case : Dict ) -> Tuple:
"""simple docstring"""
return RobertaTokenizer.from_pretrained(self.checkpoint ,**_snake_case )
def UpperCAmelCase ( self : Any ,**_snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return ClapFeatureExtractor.from_pretrained(self.checkpoint ,**_snake_case )
def UpperCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : List[Any] = self.get_feature_extractor()
lowercase__ : str = ClapProcessor(tokenizer=_snake_case ,feature_extractor=_snake_case )
processor.save_pretrained(self.tmpdirname )
lowercase__ : str = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_snake_case )
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
lowercase__ : Dict = ClapProcessor(tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : List[str] = self.get_tokenizer(bos_token='''(BOS)''' ,eos_token='''(EOS)''' )
lowercase__ : Optional[int] = self.get_feature_extractor(do_normalize=_snake_case ,padding_value=1.0 )
lowercase__ : Tuple = ClapProcessor.from_pretrained(
self.tmpdirname ,bos_token='''(BOS)''' ,eos_token='''(EOS)''' ,do_normalize=_snake_case ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_snake_case )
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,_snake_case )
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Dict = self.get_feature_extractor()
lowercase__ : List[Any] = self.get_tokenizer()
lowercase__ : Optional[Any] = ClapProcessor(tokenizer=_snake_case ,feature_extractor=_snake_case )
lowercase__ : int = floats_list((3, 1_000) )
lowercase__ : Union[str, Any] = feature_extractor(_snake_case ,return_tensors='''np''' )
lowercase__ : List[str] = processor(audios=_snake_case ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowercase__ : int = self.get_feature_extractor()
lowercase__ : int = self.get_tokenizer()
lowercase__ : Dict = ClapProcessor(tokenizer=_snake_case ,feature_extractor=_snake_case )
lowercase__ : List[Any] = '''This is a test string'''
lowercase__ : List[str] = processor(text=_snake_case )
lowercase__ : Any = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Tuple = self.get_feature_extractor()
lowercase__ : str = self.get_tokenizer()
lowercase__ : List[Any] = ClapProcessor(tokenizer=_snake_case ,feature_extractor=_snake_case )
lowercase__ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : Optional[int] = processor.batch_decode(_snake_case )
lowercase__ : str = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
lowercase__ : int = self.get_feature_extractor()
lowercase__ : str = self.get_tokenizer()
lowercase__ : str = ClapProcessor(tokenizer=_snake_case ,feature_extractor=_snake_case )
self.assertListEqual(
processor.model_input_names[2:] ,feature_extractor.model_input_names ,msg='''`processor` and `feature_extractor` model input names do not match''' ,)
| 361
|
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase_ = '#'
class __A :
'''simple docstring'''
def __init__( self : str ) -> None:
"""simple docstring"""
lowercase__ : dict = {}
def UpperCAmelCase ( self : List[str] ,_snake_case : str ) -> None:
"""simple docstring"""
lowercase__ : str = self._trie
for char in text:
if char not in trie:
lowercase__ : Union[str, Any] = {}
lowercase__ : Optional[Any] = trie[char]
lowercase__ : Dict = True
def UpperCAmelCase ( self : Tuple ,_snake_case : str ) -> tuple | list:
"""simple docstring"""
lowercase__ : Optional[Any] = self._trie
for char in prefix:
if char in trie:
lowercase__ : Union[str, Any] = trie[char]
else:
return []
return self._elements(_snake_case )
def UpperCAmelCase ( self : List[str] ,_snake_case : dict ) -> tuple:
"""simple docstring"""
lowercase__ : str = []
for c, v in d.items():
lowercase__ : List[Any] = [''' '''] if c == END else [(c + s) for s in self._elements(_snake_case )]
result.extend(_snake_case )
return tuple(_snake_case )
lowerCAmelCase_ = Trie()
lowerCAmelCase_ = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def __UpperCAmelCase ( __lowerCamelCase ) -> tuple:
lowercase__ : List[Any] = trie.find_word(__lowerCamelCase )
return tuple(string + word for word in suffixes )
def __UpperCAmelCase ( ) -> None:
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302
| 0
|
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
# TODO Update this
lowerCAmelCase_ = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : int = "esm"
def __init__( self : List[Any] ,_snake_case : int=None ,_snake_case : Optional[Any]=None ,_snake_case : Optional[int]=None ,_snake_case : List[str]=768 ,_snake_case : Optional[int]=12 ,_snake_case : Optional[int]=12 ,_snake_case : Optional[Any]=3_072 ,_snake_case : Union[str, Any]=0.1 ,_snake_case : int=0.1 ,_snake_case : Optional[Any]=1_026 ,_snake_case : Optional[int]=0.02 ,_snake_case : int=1e-12 ,_snake_case : Dict="absolute" ,_snake_case : Optional[int]=True ,_snake_case : Union[str, Any]=None ,_snake_case : Dict=False ,_snake_case : List[str]=False ,_snake_case : Optional[Any]=None ,_snake_case : Any=None ,**_snake_case : Optional[Any] ,) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=_snake_case ,mask_token_id=_snake_case ,**_snake_case )
lowercase__ : Tuple = vocab_size
lowercase__ : Tuple = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : int = hidden_dropout_prob
lowercase__ : int = attention_probs_dropout_prob
lowercase__ : Optional[int] = max_position_embeddings
lowercase__ : Dict = initializer_range
lowercase__ : int = layer_norm_eps
lowercase__ : int = position_embedding_type
lowercase__ : Union[str, Any] = use_cache
lowercase__ : List[Any] = emb_layer_norm_before
lowercase__ : Any = token_dropout
lowercase__ : List[str] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
lowercase__ : Dict = EsmFoldConfig()
elif isinstance(_snake_case ,_snake_case ):
lowercase__ : Dict = EsmFoldConfig(**_snake_case )
lowercase__ : Optional[Any] = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
lowercase__ : Tuple = get_default_vocab_list()
else:
lowercase__ : str = vocab_list
else:
lowercase__ : str = None
lowercase__ : Union[str, Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,'''use_esm_attn_map''' ,_snake_case ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def UpperCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config ,_snake_case ):
lowercase__ : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : str = None
lowerCAmelCase : bool = True
lowerCAmelCase : bool = False
lowerCAmelCase : bool = False
lowerCAmelCase : bool = False
lowerCAmelCase : float = 0
lowerCAmelCase : bool = True
lowerCAmelCase : bool = False
lowerCAmelCase : int = 1_2_8
lowerCAmelCase : "TrunkConfig" = None
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if self.trunk is None:
lowercase__ : int = TrunkConfig()
elif isinstance(self.trunk ,_snake_case ):
lowercase__ : Any = TrunkConfig(**self.trunk )
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = asdict(self )
lowercase__ : List[str] = self.trunk.to_dict()
return output
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : int = 4_8
lowerCAmelCase : int = 1_0_2_4
lowerCAmelCase : int = 1_2_8
lowerCAmelCase : int = 3_2
lowerCAmelCase : int = 3_2
lowerCAmelCase : int = 3_2
lowerCAmelCase : float = 0
lowerCAmelCase : float = 0
lowerCAmelCase : bool = False
lowerCAmelCase : int = 4
lowerCAmelCase : Optional[int] = 1_2_8
lowerCAmelCase : "StructureModuleConfig" = None
def UpperCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
if self.structure_module is None:
lowercase__ : Tuple = StructureModuleConfig()
elif isinstance(self.structure_module ,_snake_case ):
lowercase__ : Tuple = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
lowercase__ : Optional[int] = self.sequence_state_dim // self.sequence_head_width
lowercase__ : Any = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def UpperCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
lowercase__ : List[Any] = asdict(self )
lowercase__ : Dict = self.structure_module.to_dict()
return output
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : int = 3_8_4
lowerCAmelCase : int = 1_2_8
lowerCAmelCase : int = 1_6
lowerCAmelCase : int = 1_2_8
lowerCAmelCase : int = 1_2
lowerCAmelCase : int = 4
lowerCAmelCase : int = 8
lowerCAmelCase : float = 0.1
lowerCAmelCase : int = 8
lowerCAmelCase : int = 1
lowerCAmelCase : int = 2
lowerCAmelCase : int = 7
lowerCAmelCase : int = 1_0
lowerCAmelCase : float = 1e-8
lowerCAmelCase : float = 1e5
def UpperCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
return asdict(self )
def __UpperCAmelCase ( ) -> Any:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 362
|
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase_ = logging.get_logger(__name__)
# General docstring
lowerCAmelCase_ = 'RegNetConfig'
# Base docstring
lowerCAmelCase_ = 'facebook/regnet-y-040'
lowerCAmelCase_ = [1, 1_088, 7, 7]
# Image classification docstring
lowerCAmelCase_ = 'facebook/regnet-y-040'
lowerCAmelCase_ = 'tabby, tabby cat'
lowerCAmelCase_ = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : int ,_snake_case : int ,_snake_case : int ,_snake_case : int = 3 ,_snake_case : int = 1 ,_snake_case : int = 1 ,_snake_case : Optional[str] = "relu" ,) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
lowercase__ : Tuple = nn.Convad(
_snake_case ,_snake_case ,kernel_size=_snake_case ,stride=_snake_case ,padding=kernel_size // 2 ,groups=_snake_case ,bias=_snake_case ,)
lowercase__ : List[Any] = nn.BatchNormad(_snake_case )
lowercase__ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.convolution(_snake_case )
lowercase__ : Tuple = self.normalization(_snake_case )
lowercase__ : Tuple = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : RegNetConfig ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : List[Any] = RegNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act )
lowercase__ : str = config.num_channels
def UpperCAmelCase ( self : int ,_snake_case : Dict ) -> str:
"""simple docstring"""
lowercase__ : Union[str, Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
lowercase__ : Optional[int] = self.embedder(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : str ,_snake_case : int ,_snake_case : int ,_snake_case : int = 2 ) -> Any:
"""simple docstring"""
super().__init__()
lowercase__ : List[str] = nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ,stride=_snake_case ,bias=_snake_case )
lowercase__ : Any = nn.BatchNormad(_snake_case )
def UpperCAmelCase ( self : List[str] ,_snake_case : Tensor ) -> Tensor:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.convolution(_snake_case )
lowercase__ : Optional[int] = self.normalization(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : int ,_snake_case : int ) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ : Any = nn.AdaptiveAvgPoolad((1, 1) )
lowercase__ : Dict = nn.Sequential(
nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ) ,nn.ReLU() ,nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ) ,nn.Sigmoid() ,)
def UpperCAmelCase ( self : int ,_snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.pooler(_snake_case )
lowercase__ : Union[str, Any] = self.attention(_snake_case )
lowercase__ : List[str] = hidden_state * attention
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 1 ) -> List[str]:
"""simple docstring"""
super().__init__()
lowercase__ : Tuple = in_channels != out_channels or stride != 1
lowercase__ : Optional[int] = max(1 ,out_channels // config.groups_width )
lowercase__ : str = (
RegNetShortCut(_snake_case ,_snake_case ,stride=_snake_case ) if should_apply_shortcut else nn.Identity()
)
lowercase__ : Optional[int] = nn.Sequential(
RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,stride=_snake_case ,groups=_snake_case ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=_snake_case ) ,)
lowercase__ : str = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Tuple = hidden_state
lowercase__ : Union[str, Any] = self.layer(_snake_case )
lowercase__ : List[Any] = self.shortcut(_snake_case )
hidden_state += residual
lowercase__ : Optional[int] = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 1 ) -> Optional[int]:
"""simple docstring"""
super().__init__()
lowercase__ : List[Any] = in_channels != out_channels or stride != 1
lowercase__ : List[str] = max(1 ,out_channels // config.groups_width )
lowercase__ : Tuple = (
RegNetShortCut(_snake_case ,_snake_case ,stride=_snake_case ) if should_apply_shortcut else nn.Identity()
)
lowercase__ : str = nn.Sequential(
RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,stride=_snake_case ,groups=_snake_case ,activation=config.hidden_act ) ,RegNetSELayer(_snake_case ,reduced_channels=int(round(in_channels / 4 ) ) ) ,RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=_snake_case ) ,)
lowercase__ : Optional[Any] = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : str = hidden_state
lowercase__ : Optional[Any] = self.layer(_snake_case )
lowercase__ : int = self.shortcut(_snake_case )
hidden_state += residual
lowercase__ : str = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 2 ,_snake_case : int = 2 ,) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[Any] = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
lowercase__ : Optional[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_snake_case ,_snake_case ,_snake_case ,stride=_snake_case ,) ,*[layer(_snake_case ,_snake_case ,_snake_case ) for _ in range(depth - 1 )] ,)
def UpperCAmelCase ( self : Tuple ,_snake_case : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.layers(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_snake_case : RegNetConfig ) -> List[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : str = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_snake_case ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
lowercase__ : str = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_snake_case ,config.depths[1:] ):
self.stages.append(RegNetStage(_snake_case ,_snake_case ,_snake_case ,depth=_snake_case ) )
def UpperCAmelCase ( self : List[str] ,_snake_case : Tensor ,_snake_case : bool = False ,_snake_case : bool = True ) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
lowercase__ : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__ : int = hidden_states + (hidden_state,)
lowercase__ : Any = stage_module(_snake_case )
if output_hidden_states:
lowercase__ : Optional[int] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_snake_case ,hidden_states=_snake_case )
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : int = RegNetConfig
lowerCAmelCase : List[Any] = "regnet"
lowerCAmelCase : Optional[int] = "pixel_values"
lowerCAmelCase : Union[str, Any] = True
def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
if isinstance(_snake_case ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode='''fan_out''' ,nonlinearity='''relu''' )
elif isinstance(_snake_case ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Dict ,_snake_case : Any=False ) -> Optional[int]:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
lowercase__ : str = value
lowerCAmelCase_ = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." ,A_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_snake_case : Any ) -> Tuple:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : Any = config
lowercase__ : List[str] = RegNetEmbeddings(_snake_case )
lowercase__ : Any = RegNetEncoder(_snake_case )
lowercase__ : Dict = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def UpperCAmelCase ( self : Dict ,_snake_case : Tensor ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
lowercase__ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Union[str, Any] = self.embedder(_snake_case )
lowercase__ : List[Any] = self.encoder(
_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case )
lowercase__ : str = encoder_outputs[0]
lowercase__ : Optional[int] = self.pooler(_snake_case )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_snake_case ,pooler_output=_snake_case ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,A_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __A ( A_ ):
'''simple docstring'''
def __init__( self : int ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : Optional[Any] = config.num_labels
lowercase__ : int = RegNetModel(_snake_case )
# classification head
lowercase__ : str = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def UpperCAmelCase ( self : List[Any] ,_snake_case : Optional[torch.FloatTensor] = None ,_snake_case : Optional[torch.LongTensor] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ,) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
lowercase__ : Any = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : List[Any] = self.regnet(_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case )
lowercase__ : List[str] = outputs.pooler_output if return_dict else outputs[1]
lowercase__ : Union[str, Any] = self.classifier(_snake_case )
lowercase__ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__ : List[Any] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__ : Dict = '''single_label_classification'''
else:
lowercase__ : Optional[int] = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowercase__ : Union[str, Any] = MSELoss()
if self.num_labels == 1:
lowercase__ : List[Any] = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
lowercase__ : Tuple = loss_fct(_snake_case ,_snake_case )
elif self.config.problem_type == "single_label_classification":
lowercase__ : Tuple = CrossEntropyLoss()
lowercase__ : str = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__ : Any = BCEWithLogitsLoss()
lowercase__ : Union[str, Any] = loss_fct(_snake_case ,_snake_case )
if not return_dict:
lowercase__ : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_snake_case ,logits=_snake_case ,hidden_states=outputs.hidden_states )
| 302
| 0
|
"""simple docstring"""
import re
import subprocess
import sys
lowerCAmelCase_ = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
lowerCAmelCase_ = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
)
lowerCAmelCase_ = '|'.join(sys.argv[1:])
lowerCAmelCase_ = re.compile(RF'''^({joined_dirs}).*?\.py$''')
lowerCAmelCase_ = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 363
|
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase_ = 1.6021E-19 # units = C
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = "llama"
lowerCAmelCase : Tuple = ["past_key_values"]
def __init__( self : Any ,_snake_case : List[Any]=32_000 ,_snake_case : str=4_096 ,_snake_case : Dict=11_008 ,_snake_case : int=32 ,_snake_case : Tuple=32 ,_snake_case : Optional[int]=None ,_snake_case : List[Any]="silu" ,_snake_case : Union[str, Any]=2_048 ,_snake_case : List[str]=0.02 ,_snake_case : List[Any]=1e-6 ,_snake_case : Optional[Any]=True ,_snake_case : Any=0 ,_snake_case : Optional[int]=1 ,_snake_case : List[str]=2 ,_snake_case : Tuple=1 ,_snake_case : str=False ,_snake_case : Tuple=None ,**_snake_case : Tuple ,) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = vocab_size
lowercase__ : Optional[Any] = max_position_embeddings
lowercase__ : Any = hidden_size
lowercase__ : Dict = intermediate_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : Any = num_key_value_heads
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : Any = initializer_range
lowercase__ : str = rms_norm_eps
lowercase__ : List[Any] = pretraining_tp
lowercase__ : Any = use_cache
lowercase__ : Optional[int] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_snake_case ,bos_token_id=_snake_case ,eos_token_id=_snake_case ,tie_word_embeddings=_snake_case ,**_snake_case ,)
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,_snake_case ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
lowercase__ : List[Any] = self.rope_scaling.get('''type''' ,_snake_case )
lowercase__ : Optional[Any] = self.rope_scaling.get('''factor''' ,_snake_case )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_snake_case ,_snake_case ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 364
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = ["pixel_values"]
def __init__( self : Tuple ,_snake_case : bool = True ,_snake_case : Optional[Dict[str, int]] = None ,_snake_case : PILImageResampling = PILImageResampling.BICUBIC ,_snake_case : bool = True ,_snake_case : bool = True ,_snake_case : Union[int, float] = 1 / 255 ,_snake_case : Dict[str, int] = None ,_snake_case : bool = True ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,**_snake_case : Optional[Any] ,) -> None:
"""simple docstring"""
super().__init__(**_snake_case )
lowercase__ : str = size if size is not None else {'''height''': 224, '''width''': 224}
lowercase__ : Optional[int] = get_size_dict(_snake_case )
lowercase__ : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase__ : Optional[int] = get_size_dict(_snake_case ,default_to_square=_snake_case ,param_name='''crop_size''' )
lowercase__ : Tuple = do_resize
lowercase__ : List[Any] = do_rescale
lowercase__ : Any = do_normalize
lowercase__ : List[str] = do_center_crop
lowercase__ : Optional[Any] = crop_size
lowercase__ : Union[str, Any] = size
lowercase__ : Any = resample
lowercase__ : int = rescale_factor
lowercase__ : Tuple = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase__ : str = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase ( self : str ,_snake_case : np.ndarray ,_snake_case : Dict[str, int] ,_snake_case : PILImageResampling = PILImageResampling.BILINEAR ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Dict ,) -> np.ndarray:
"""simple docstring"""
lowercase__ : List[str] = get_size_dict(_snake_case )
if "shortest_edge" in size:
lowercase__ : str = get_resize_output_image_size(_snake_case ,size=size['''shortest_edge'''] ,default_to_square=_snake_case )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
lowercase__ : int = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}""" )
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : List[Any] ,_snake_case : np.ndarray ,_snake_case : Dict[str, int] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Tuple ,) -> np.ndarray:
"""simple docstring"""
lowercase__ : Optional[Any] = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_snake_case ,size=(size['''height'''], size['''width''']) ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : np.ndarray ,_snake_case : float ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Optional[int] ) -> np.ndarray:
"""simple docstring"""
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Dict ,_snake_case : np.ndarray ,_snake_case : Union[float, List[float]] ,_snake_case : Union[float, List[float]] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Dict ,) -> np.ndarray:
"""simple docstring"""
return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : ImageInput ,_snake_case : Optional[bool] = None ,_snake_case : Dict[str, int] = None ,_snake_case : PILImageResampling = None ,_snake_case : bool = None ,_snake_case : int = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[float] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[float, List[float]]] = None ,_snake_case : Optional[Union[str, TensorType]] = None ,_snake_case : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**_snake_case : List[str] ,) -> BatchFeature:
"""simple docstring"""
lowercase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : int = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
lowercase__ : Tuple = get_size_dict(_snake_case ,param_name='''crop_size''' ,default_to_square=_snake_case )
lowercase__ : Tuple = resample if resample is not None else self.resample
lowercase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowercase__ : List[str] = image_std if image_std is not None else self.image_std
lowercase__ : Optional[int] = size if size is not None else self.size
lowercase__ : int = get_size_dict(_snake_case )
if not is_batched(_snake_case ):
lowercase__ : Optional[Any] = [images]
if not valid_images(_snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
lowercase__ : str = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
lowercase__ : int = [self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case ) for image in images]
if do_center_crop:
lowercase__ : str = [self.center_crop(image=_snake_case ,size=_snake_case ) for image in images]
if do_rescale:
lowercase__ : Optional[Any] = [self.rescale(image=_snake_case ,scale=_snake_case ) for image in images]
if do_normalize:
lowercase__ : List[str] = [self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case ) for image in images]
lowercase__ : Union[str, Any] = [to_channel_dimension_format(_snake_case ,_snake_case ) for image in images]
lowercase__ : Any = {'''pixel_values''': images}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 302
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['YolosFeatureExtractor']
lowerCAmelCase_ = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 365
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 302
| 0
|
from collections import namedtuple
lowerCAmelCase_ = namedtuple('from_to', 'from_ to')
lowerCAmelCase_ = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.0_0_1, 1_000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.0_0_4_5_4, 264.172),
'cubicyard': from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
'cubicfoot': from_to(0.0_2_8, 35.3_147),
'cup': from_to(0.0_0_0_2_3_6_5_8_8, 4_226.75),
}
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ''', '''.join(__lowerCamelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ''', '''.join(__lowerCamelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = "cpu" , __lowerCamelCase = None ) -> None:
lowercase__ : List[str] = torch.load(__lowerCamelCase , map_location=__lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__lowerCamelCase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
lowercase__ : List[Any] = v.half()
if save_path is None: # overwrite src_path
lowercase__ : Any = src_path
torch.save(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 302
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : UNetaDModel
lowerCAmelCase : ScoreSdeVeScheduler
def __init__( self : Optional[Any] ,_snake_case : UNetaDModel ,_snake_case : ScoreSdeVeScheduler ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_snake_case ,scheduler=_snake_case )
@torch.no_grad()
def __call__( self : Any ,_snake_case : int = 1 ,_snake_case : int = 2_000 ,_snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_snake_case : Optional[str] = "pil" ,_snake_case : bool = True ,**_snake_case : Any ,) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.unet.config.sample_size
lowercase__ : Dict = (batch_size, 3, img_size, img_size)
lowercase__ : Tuple = self.unet
lowercase__ : Any = randn_tensor(_snake_case ,generator=_snake_case ) * self.scheduler.init_noise_sigma
lowercase__ : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(_snake_case )
self.scheduler.set_sigmas(_snake_case )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] ,device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowercase__ : List[str] = self.unet(_snake_case ,_snake_case ).sample
lowercase__ : Optional[Any] = self.scheduler.step_correct(_snake_case ,_snake_case ,generator=_snake_case ).prev_sample
# prediction step
lowercase__ : str = model(_snake_case ,_snake_case ).sample
lowercase__ : List[Any] = self.scheduler.step_pred(_snake_case ,_snake_case ,_snake_case ,generator=_snake_case )
lowercase__ : Optional[int] = output.prev_sample, output.prev_sample_mean
lowercase__ : Union[str, Any] = sample_mean.clamp(0 ,1 )
lowercase__ : int = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowercase__ : Any = self.numpy_to_pil(_snake_case )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_snake_case )
| 367
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : UNetaDModel
lowerCAmelCase : ScoreSdeVeScheduler
def __init__( self : Optional[Any] ,_snake_case : UNetaDModel ,_snake_case : ScoreSdeVeScheduler ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_snake_case ,scheduler=_snake_case )
@torch.no_grad()
def __call__( self : Any ,_snake_case : int = 1 ,_snake_case : int = 2_000 ,_snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_snake_case : Optional[str] = "pil" ,_snake_case : bool = True ,**_snake_case : Any ,) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.unet.config.sample_size
lowercase__ : Dict = (batch_size, 3, img_size, img_size)
lowercase__ : Tuple = self.unet
lowercase__ : Any = randn_tensor(_snake_case ,generator=_snake_case ) * self.scheduler.init_noise_sigma
lowercase__ : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(_snake_case )
self.scheduler.set_sigmas(_snake_case )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] ,device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowercase__ : List[str] = self.unet(_snake_case ,_snake_case ).sample
lowercase__ : Optional[Any] = self.scheduler.step_correct(_snake_case ,_snake_case ,generator=_snake_case ).prev_sample
# prediction step
lowercase__ : str = model(_snake_case ,_snake_case ).sample
lowercase__ : List[Any] = self.scheduler.step_pred(_snake_case ,_snake_case ,_snake_case ,generator=_snake_case )
lowercase__ , lowercase__ : Optional[int] = output.prev_sample, output.prev_sample_mean
lowercase__ : Union[str, Any] = sample_mean.clamp(0 ,1 )
lowercase__ : int = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowercase__ : Any = self.numpy_to_pil(_snake_case )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_snake_case )
| 302
| 0
|
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = (IPNDMScheduler,)
lowerCAmelCase : Optional[int] = (("num_inference_steps", 5_0),)
def UpperCAmelCase ( self : Optional[Any] ,**_snake_case : str ) -> int:
"""simple docstring"""
lowercase__ : Optional[Any] = {'''num_train_timesteps''': 1_000}
config.update(**_snake_case )
return config
def UpperCAmelCase ( self : List[Any] ,_snake_case : int=0 ,**_snake_case : str ) -> Tuple:
"""simple docstring"""
lowercase__ : Dict = dict(self.forward_default_kwargs )
lowercase__ : Union[str, Any] = kwargs.pop('''num_inference_steps''' ,_snake_case )
lowercase__ : List[Any] = self.dummy_sample
lowercase__ : Any = 0.1 * sample
lowercase__ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ : Optional[Any] = self.get_scheduler_config(**_snake_case )
lowercase__ : List[str] = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals
lowercase__ : Dict = dummy_past_residuals[:]
if time_step is None:
lowercase__ : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case )
lowercase__ : Tuple = scheduler_class.from_pretrained(_snake_case )
new_scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals
lowercase__ : Optional[Any] = dummy_past_residuals[:]
lowercase__ : Optional[int] = scheduler.step(_snake_case ,_snake_case ,_snake_case ,**_snake_case ).prev_sample
lowercase__ : int = new_scheduler.step(_snake_case ,_snake_case ,_snake_case ,**_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowercase__ : Optional[int] = scheduler.step(_snake_case ,_snake_case ,_snake_case ,**_snake_case ).prev_sample
lowercase__ : List[Any] = new_scheduler.step(_snake_case ,_snake_case ,_snake_case ,**_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : List[Any] ,_snake_case : Union[str, Any]=0 ,**_snake_case : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : str = dict(self.forward_default_kwargs )
lowercase__ : Optional[int] = kwargs.pop('''num_inference_steps''' ,_snake_case )
lowercase__ : Dict = self.dummy_sample
lowercase__ : Any = 0.1 * sample
lowercase__ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ : Union[str, Any] = self.get_scheduler_config()
lowercase__ : int = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ : Any = dummy_past_residuals[:]
if time_step is None:
lowercase__ : Any = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case )
lowercase__ : Union[str, Any] = scheduler_class.from_pretrained(_snake_case )
# copy over dummy past residuals
new_scheduler.set_timesteps(_snake_case )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ : Optional[Any] = dummy_past_residuals[:]
lowercase__ : Dict = scheduler.step(_snake_case ,_snake_case ,_snake_case ,**_snake_case ).prev_sample
lowercase__ : Optional[int] = new_scheduler.step(_snake_case ,_snake_case ,_snake_case ,**_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowercase__ : Union[str, Any] = scheduler.step(_snake_case ,_snake_case ,_snake_case ,**_snake_case ).prev_sample
lowercase__ : List[str] = new_scheduler.step(_snake_case ,_snake_case ,_snake_case ,**_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : int ,**_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : str = self.scheduler_classes[0]
lowercase__ : Optional[int] = self.get_scheduler_config(**_snake_case )
lowercase__ : Any = scheduler_class(**_snake_case )
lowercase__ : str = 10
lowercase__ : Any = self.dummy_model()
lowercase__ : Dict = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ : Union[str, Any] = model(_snake_case ,_snake_case )
lowercase__ : int = scheduler.step(_snake_case ,_snake_case ,_snake_case ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowercase__ : List[Any] = model(_snake_case ,_snake_case )
lowercase__ : Tuple = scheduler.step(_snake_case ,_snake_case ,_snake_case ).prev_sample
return sample
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ : Dict = dict(self.forward_default_kwargs )
lowercase__ : int = kwargs.pop('''num_inference_steps''' ,_snake_case )
for scheduler_class in self.scheduler_classes:
lowercase__ : Optional[Any] = self.get_scheduler_config()
lowercase__ : Tuple = scheduler_class(**_snake_case )
lowercase__ : List[Any] = self.dummy_sample
lowercase__ : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(_snake_case ,'''set_timesteps''' ):
scheduler.set_timesteps(_snake_case )
elif num_inference_steps is not None and not hasattr(_snake_case ,'''set_timesteps''' ):
lowercase__ : Any = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ : str = dummy_past_residuals[:]
lowercase__ : Optional[int] = scheduler.timesteps[5]
lowercase__ : Union[str, Any] = scheduler.timesteps[6]
lowercase__ : Optional[Any] = scheduler.step(_snake_case ,_snake_case ,_snake_case ,**_snake_case ).prev_sample
lowercase__ : Tuple = scheduler.step(_snake_case ,_snake_case ,_snake_case ,**_snake_case ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
lowercase__ : str = scheduler.step(_snake_case ,_snake_case ,_snake_case ,**_snake_case ).prev_sample
lowercase__ : Tuple = scheduler.step(_snake_case ,_snake_case ,_snake_case ,**_snake_case ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=_snake_case ,time_step=_snake_case )
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=_snake_case ,time_step=_snake_case )
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.full_loop()
lowercase__ : Any = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 368
|
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase_ = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = "maskformer"
lowerCAmelCase : Any = {"hidden_size": "mask_feature_size"}
lowerCAmelCase : Optional[int] = ["resnet", "swin"]
lowerCAmelCase : str = ["detr"]
def __init__( self : int ,_snake_case : int = 256 ,_snake_case : int = 256 ,_snake_case : float = 0.1 ,_snake_case : bool = False ,_snake_case : Optional[Dict] = None ,_snake_case : Optional[Dict] = None ,_snake_case : float = 0.02 ,_snake_case : float = 1.0 ,_snake_case : float = 1.0 ,_snake_case : float = 1.0 ,_snake_case : float = 20.0 ,_snake_case : Optional[bool] = None ,**_snake_case : Optional[Any] ,) -> Dict:
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowercase__ : Any = SwinConfig(
image_size=384 ,in_channels=3 ,patch_size=4 ,embed_dim=128 ,depths=[2, 2, 18, 2] ,num_heads=[4, 8, 16, 32] ,window_size=12 ,drop_path_rate=0.3 ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ,)
if isinstance(_snake_case ,_snake_case ):
lowercase__ : List[str] = backbone_config.pop('''model_type''' )
lowercase__ : List[Any] = CONFIG_MAPPING[backbone_model_type]
lowercase__ : str = config_class.from_dict(_snake_case )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowercase__ : Union[str, Any] = DetrConfig()
else:
# verify that the decoder is supported
lowercase__ : Tuple = (
decoder_config.pop('''model_type''' ) if isinstance(_snake_case ,_snake_case ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(_snake_case ,_snake_case ):
lowercase__ : Optional[int] = CONFIG_MAPPING[decoder_type]
lowercase__ : Optional[Any] = config_class.from_dict(_snake_case )
lowercase__ : List[Any] = backbone_config
lowercase__ : List[Any] = decoder_config
# main feature dimension for the model
lowercase__ : List[str] = fpn_feature_size
lowercase__ : int = mask_feature_size
# initializer
lowercase__ : str = init_std
lowercase__ : str = init_xavier_std
# Hungarian matcher && loss
lowercase__ : Optional[int] = cross_entropy_weight
lowercase__ : List[Any] = dice_weight
lowercase__ : List[str] = mask_weight
lowercase__ : str = use_auxiliary_loss
lowercase__ : Optional[int] = no_object_weight
lowercase__ : Optional[Any] = output_auxiliary_logits
lowercase__ : Optional[Any] = self.decoder_config.encoder_attention_heads
lowercase__ : Optional[Any] = self.decoder_config.num_hidden_layers
super().__init__(**_snake_case )
@classmethod
def UpperCAmelCase ( cls : Any ,_snake_case : PretrainedConfig ,_snake_case : PretrainedConfig ,**_snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return cls(
backbone_config=_snake_case ,decoder_config=_snake_case ,**_snake_case ,)
def UpperCAmelCase ( self : str ) -> Dict[str, any]:
"""simple docstring"""
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : int = self.backbone_config.to_dict()
lowercase__ : List[Any] = self.decoder_config.to_dict()
lowercase__ : List[str] = self.__class__.model_type
return output
| 302
| 0
|
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowerCAmelCase_ = TypeVar('T')
class __A ( Generic[T] ):
'''simple docstring'''
lowerCAmelCase : deque[T] # Cache store of keys
lowerCAmelCase : set[T] # References of the keys in cache
lowerCAmelCase : int = 1_0 # Maximum capacity of cache
def __init__( self : Optional[Any] ,_snake_case : int ) -> None:
"""simple docstring"""
lowercase__ : int = deque()
lowercase__ : Any = set()
if not n:
lowercase__ : Union[str, Any] = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
lowercase__ : int = n
def UpperCAmelCase ( self : str ,_snake_case : T ) -> None:
"""simple docstring"""
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowercase__ : Optional[Any] = self.dq_store.pop()
self.key_reference.remove(_snake_case )
else:
self.dq_store.remove(_snake_case )
self.dq_store.appendleft(_snake_case )
self.key_reference.add(_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ) -> None:
"""simple docstring"""
for k in self.dq_store:
print(_snake_case )
def __repr__( self : List[str] ) -> str:
"""simple docstring"""
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 369
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
lowercase__ : int = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowercase__ : Dict = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ : Optional[int] = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ : List[Any] = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__ : Dict = [3, 3, 3, 3]
lowercase__ : str = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__ : List[str] = [4, 4, 4, 4]
lowercase__ : Any = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__ : List[str] = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__ : List[str] = [3, 3, 3, 3]
else:
lowercase__ : Optional[Any] = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__ : Optional[int] = 96
elif "small" in model_name:
lowercase__ : Union[str, Any] = 96
elif "base" in model_name:
lowercase__ : Tuple = 1_28
elif "large" in model_name:
lowercase__ : Any = 1_92
elif "xlarge" in model_name:
lowercase__ : Any = 2_56
elif "huge" in model_name:
lowercase__ : Union[str, Any] = 3_52
# set label information
lowercase__ : List[Any] = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowercase__ : Optional[int] = '''imagenet-22k-id2label.json'''
else:
lowercase__ : Optional[Any] = '''imagenet-1k-id2label.json'''
lowercase__ : Dict = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
lowercase__ : int = FocalNetConfig(
embed_dim=__lowerCamelCase , depths=__lowerCamelCase , focal_levels=__lowerCamelCase , focal_windows=__lowerCamelCase , use_conv_embed=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , use_post_layernorm=__lowerCamelCase , use_layerscale=__lowerCamelCase , )
return config
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
if "patch_embed.proj" in name:
lowercase__ : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase__ : Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowercase__ : Dict = '''encoder.''' + name
if "encoder.layers" in name:
lowercase__ : Tuple = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowercase__ : Union[str, Any] = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowercase__ : Optional[Any] = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__ : Dict = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__ : Dict = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__ : Optional[Any] = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowercase__ : Dict = '''layernorm.weight'''
if name == "norm.bias":
lowercase__ : Dict = '''layernorm.bias'''
if "head" in name:
lowercase__ : Dict = name.replace('''head''' , '''classifier''' )
else:
lowercase__ : List[Any] = '''focalnet.''' + name
return name
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> List[str]:
# fmt: off
lowercase__ : Any = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowercase__ : Optional[int] = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , __lowerCamelCase )
lowercase__ : str = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowercase__ : int = state_dict.pop(__lowerCamelCase )
lowercase__ : Any = val
lowercase__ : List[Any] = get_focalnet_config(__lowerCamelCase )
lowercase__ : Optional[int] = FocalNetForImageClassification(__lowerCamelCase )
model.eval()
# load state dict
model.load_state_dict(__lowerCamelCase )
# verify conversion
lowercase__ : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ : int = BitImageProcessor(
do_resize=__lowerCamelCase , size={'''shortest_edge''': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=__lowerCamelCase , crop_size=2_24 , do_normalize=__lowerCamelCase , image_mean=__lowerCamelCase , image_std=__lowerCamelCase , )
lowercase__ : str = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
lowercase__ : List[str] = processor(images=__lowerCamelCase , return_tensors='''pt''' )
lowercase__ : List[str] = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowercase__ : Optional[Any] = image_transforms(__lowerCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , __lowerCamelCase , atol=1E-4 )
lowercase__ : Optional[Any] = model(**__lowerCamelCase )
lowercase__ : Optional[int] = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__ : Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
lowercase__ : Union[str, Any] = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
lowercase__ : Optional[int] = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
lowercase__ : Dict = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
lowercase__ : List[str] = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
lowercase__ : List[str] = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
lowerCAmelCase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 302
| 0
|
"""simple docstring"""
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowerCAmelCase_ = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None ) -> str:
lowercase__ : List[str] = True
while ask_again:
lowercase__ : List[str] = input(__lowerCamelCase )
try:
if default is not None and len(__lowerCamelCase ) == 0:
return default
return convert_value(__lowerCamelCase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase=[] , __lowerCamelCase=None , __lowerCamelCase=0 ) -> Dict:
lowercase__ : Optional[Any] = BulletMenu(__lowerCamelCase , __lowerCamelCase )
lowercase__ : str = menu.run(default_choice=__lowerCamelCase )
return convert_value(__lowerCamelCase ) if convert_value is not None else result
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
lowercase__ : int = int(__lowerCamelCase )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
lowercase__ : List[str] = int(__lowerCamelCase )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def __UpperCAmelCase ( __lowerCamelCase ) -> List[str]:
lowercase__ : Union[str, Any] = int(__lowerCamelCase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
lowercase__ : Optional[Any] = int(__lowerCamelCase )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
lowercase__ : int = int(__lowerCamelCase )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
return {"yes": True, "no": False}[value.lower()]
class __A ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Any ,_snake_case : List[str] ,_snake_case : Optional[Any] ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Union[str, Any] = super()._format_usage(_snake_case ,_snake_case ,_snake_case ,_snake_case )
lowercase__ : Tuple = usage.replace('''<command> [<args>] ''' ,'''''' )
return usage
| 370
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = ["image_processor", "tokenizer"]
lowerCAmelCase : int = "ChineseCLIPImageProcessor"
lowerCAmelCase : str = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Tuple ,_snake_case : str=None ,_snake_case : Union[str, Any]=None ,**_snake_case : str ) -> Any:
"""simple docstring"""
lowercase__ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,_snake_case ,)
lowercase__ : Tuple = kwargs.pop('''feature_extractor''' )
lowercase__ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case ,_snake_case )
lowercase__ : List[Any] = self.image_processor
def __call__( self : List[Any] ,_snake_case : Optional[int]=None ,_snake_case : Dict=None ,_snake_case : List[Any]=None ,**_snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase__ : str = self.tokenizer(_snake_case ,return_tensors=_snake_case ,**_snake_case )
if images is not None:
lowercase__ : str = self.image_processor(_snake_case ,return_tensors=_snake_case ,**_snake_case )
if text is not None and images is not None:
lowercase__ : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) ,tensor_type=_snake_case )
def UpperCAmelCase ( self : Any ,*_snake_case : List[Any] ,**_snake_case : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ,*_snake_case : Tuple ,**_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_snake_case ,**_snake_case )
@property
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.tokenizer.model_input_names
lowercase__ : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,_snake_case ,)
return self.image_processor_class
| 302
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
lowerCAmelCase_ = int(input('Enter number: ').strip())
print(F'''{number} is {"" if perfect(number) else "not "}a Perfect Number.''')
| 371
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 302
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=[0, 1, 2, 3] , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=[1, 3_84, 24, 24] , __UpperCAmelCase=True , __UpperCAmelCase=None , ) ->List[str]:
a_ = parent
a_ = batch_size
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = is_training
a_ = use_labels
a_ = hidden_size
a_ = num_hidden_layers
a_ = backbone_out_indices
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = num_labels
a_ = backbone_featmap_shape
a_ = scope
a_ = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
a_ = (image_size // patch_size) ** 2
a_ = num_patches + 1
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
a_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 1_92, 3_84, 7_68],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCAmelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[int]:
a_ = DPTModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Any:
a_ = self.num_labels
a_ = DPTForDepthEstimation(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase)
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str:
a_ = self.num_labels
a_ = DPTForSemanticSegmentation(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size))
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ = config_and_inputs
a_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : str = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
a_ : Optional[int] = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a_ : str = False
a_ : Union[str, Any] = False
a_ : List[str] = False
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = DPTModelTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->Any:
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds")
def UpperCAmelCase__ ( self) ->Optional[Any]:
pass
def UpperCAmelCase__ ( self) ->List[str]:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(__UpperCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear))
def UpperCAmelCase__ ( self) ->str:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(__UpperCAmelCase)
a_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Union[str, Any]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = True
if model_class in get_values(__UpperCAmelCase):
continue
a_ = model_class(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.train()
a_ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase)
a_ = model(**__UpperCAmelCase).loss
loss.backward()
def UpperCAmelCase__ ( self) ->Optional[Any]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = False
a_ = True
if model_class in get_values(__UpperCAmelCase) or not model_class.supports_gradient_checkpointing:
continue
a_ = model_class(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.gradient_checkpointing_enable()
model.train()
a_ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase)
a_ = model(**__UpperCAmelCase).loss
loss.backward()
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = _config_zero_init(__UpperCAmelCase)
for model_class in self.all_model_classes:
a_ = model_class(config=__UpperCAmelCase)
# Skip the check for the backbone
a_ = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
a_ = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def UpperCAmelCase__ ( self) ->int:
pass
@slow
def UpperCAmelCase__ ( self) ->Optional[int]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
a_ = DPTModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->str:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = "add"
with self.assertRaises(__UpperCAmelCase):
a_ = DPTForDepthEstimation(__UpperCAmelCase)
def UpperCamelCase ( ) ->Union[str, Any]:
"""simple docstring"""
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Any:
a_ = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas")
a_ = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas").to(__UpperCAmelCase)
a_ = prepare_img()
a_ = image_processor(images=__UpperCAmelCase , return_tensors="pt").to(__UpperCAmelCase)
# forward pass
with torch.no_grad():
a_ = model(**__UpperCAmelCase)
a_ = outputs.predicted_depth
# verify the predicted depth
a_ = torch.Size((1, 3_84, 3_84))
self.assertEqual(predicted_depth.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]]).to(__UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , __UpperCAmelCase , atol=1E-4))
| 303
|
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
a_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
a_ = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
a_ = F'''{src_lang}-{tgt_lang}'''
a_ = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=UpperCAmelCase , exist_ok=UpperCAmelCase )
a_ = os.path.join(UpperCAmelCase , "README.md" )
print(F'''Generating {path}''' )
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(UpperCAmelCase )
# make sure we are under the root of the project
UpperCamelCase_ = Path(__file__).resolve().parent.parent.parent
UpperCamelCase_ = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
UpperCamelCase_ = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
| 303
| 1
|
"""simple docstring"""
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
UpperCamelCase_ = logging.getLogger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None) ->Tuple:
super().__init__(
__UpperCAmelCase , question_encoder_tokenizer=__UpperCAmelCase , generator_tokenizer=__UpperCAmelCase , index=__UpperCAmelCase , init_retrieval=__UpperCAmelCase , )
a_ = None
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Union[str, Any]:
logger.info("initializing retrieval")
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized")
# needs to be set manually
a_ = self._infer_socket_ifname()
# avoid clash with the NCCL port
a_ = str(distributed_port + 1)
a_ = dist.new_group(ranks=__UpperCAmelCase , backend="gloo")
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main")
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group)
def UpperCAmelCase__ ( self) ->Tuple:
return dist.get_rank(group=self.process_group) == 0
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=torch.floataa) ->Tuple:
a_ = torch.empty(__UpperCAmelCase , dtype=__UpperCAmelCase)
dist.scatter(__UpperCAmelCase , src=0 , scatter_list=__UpperCAmelCase , group=self.process_group)
return target_tensor
def UpperCAmelCase__ ( self) ->Any:
a_ = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
a_ = next((addr for addr in addrs if addr.startswith("e")) , __UpperCAmelCase)
return ifname
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
a_ , a_ = self._main_retrieve(__UpperCAmelCase , __UpperCAmelCase)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__UpperCAmelCase)
# distributed training
a_ = dist.get_world_size(group=self.process_group)
# gather logic
a_ = None
if self._is_main():
a_ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa) for _ in range(__UpperCAmelCase)]
dist.gather(torch.tensor(__UpperCAmelCase) , dst=0 , gather_list=__UpperCAmelCase , group=self.process_group)
# scatter logic
a_ = question_hidden_states.shape[0]
a_ = []
a_ = []
if self._is_main():
assert len(__UpperCAmelCase) == world_size
a_ , a_ = self._main_retrieve(torch.cat(__UpperCAmelCase).numpy() , __UpperCAmelCase)
a_ , a_ = torch.tensor(__UpperCAmelCase), torch.tensor(__UpperCAmelCase)
a_ = self._chunk_tensor(__UpperCAmelCase , __UpperCAmelCase)
a_ = self._chunk_tensor(__UpperCAmelCase , __UpperCAmelCase)
a_ = self._scattered(__UpperCAmelCase , [n_queries, n_docs] , target_type=torch.intaa)
a_ = self._scattered(__UpperCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]])
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__UpperCAmelCase)
| 303
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(UpperCAmelCase , n - 1 , UpperCAmelCase ) * a) % mod
else:
a_ = binary_exponentiation(UpperCAmelCase , n / 2 , UpperCAmelCase )
return (b * b) % mod
# a prime number
UpperCamelCase_ = 701
UpperCamelCase_ = 1000000000
UpperCamelCase_ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 303
| 1
|
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
UpperCamelCase_ = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
UpperCamelCase_ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
if "://" in dataset_path:
a_ = dataset_path.split("://" )[1]
return dataset_path
def UpperCamelCase ( UpperCAmelCase ) ->bool:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = not is_remote_filesystem(UpperCAmelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(UpperCAmelCase ) , fs._strip_protocol(UpperCAmelCase ) )
else:
fs.mv(UpperCAmelCase , UpperCAmelCase , recursive=UpperCAmelCase )
def UpperCamelCase ( ) ->None:
"""simple docstring"""
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
a_ = None
a_ = None
a_ = threading.Lock()
| 303
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
| 303
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : str = """gpt_neox_japanese"""
def __init__( self , __UpperCAmelCase=3_20_00 , __UpperCAmelCase=25_60 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1.00 , __UpperCAmelCase=1_00_00 , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=True , __UpperCAmelCase=3_19_96 , __UpperCAmelCase=3_19_99 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , **__UpperCAmelCase , ) ->Tuple:
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_multiple_size
a_ = hidden_act
a_ = rotary_pct
a_ = rotary_emb_base
a_ = initializer_range
a_ = layer_norm_eps
a_ = use_cache
a_ = attention_dropout
a_ = hidden_dropout
| 303
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Dict:
a_ = inspect.getfile(accelerate.test_utils)
a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"])
a_ = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ["scripts", "test_distributed_data_loop.py"])
a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_ops.py"])
@require_multi_gpu
def UpperCAmelCase__ ( self) ->Any:
print(F'''Found {torch.cuda.device_count()} devices.''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->str:
print(F'''Found {torch.cuda.device_count()} devices.''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''')
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__)]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->List[Any]:
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1"):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
if __name__ == "__main__":
UpperCamelCase_ = Accelerator()
UpperCamelCase_ = (accelerator.state.process_index + 2, 10)
UpperCamelCase_ = torch.randint(0, 10, shape).to(accelerator.device)
UpperCamelCase_ = ''
UpperCamelCase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCamelCase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCamelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 303
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = """distilbert"""
a_ : int = {
"""hidden_size""": """dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
}
def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=5_12 , __UpperCAmelCase=False , __UpperCAmelCase=6 , __UpperCAmelCase=12 , __UpperCAmelCase=7_68 , __UpperCAmelCase=4 * 7_68 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.2 , __UpperCAmelCase=0 , **__UpperCAmelCase , ) ->Tuple:
a_ = vocab_size
a_ = max_position_embeddings
a_ = sinusoidal_pos_embds
a_ = n_layers
a_ = n_heads
a_ = dim
a_ = hidden_dim
a_ = dropout
a_ = attention_dropout
a_ = activation
a_ = initializer_range
a_ = qa_dropout
a_ = seq_classif_dropout
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
@property
def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
a_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 303
|
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->tuple[float | int, list[tuple[int, int]]]:
"""simple docstring"""
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) , np.inf )
a_ = 0
a_ = np.empty((rows, cols) , dtype=UpperCAmelCase )
a_ = None
while queue:
((a_) , (a_)) = heappop(UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(UpperCAmelCase ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(UpperCAmelCase , (dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
|
"""simple docstring"""
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
UpperCamelCase_ = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
UpperCamelCase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class snake_case :
def __init__( self) ->Optional[int]:
a_ = WATERMARK_BITS
a_ = WatermarkEncoder()
self.encoder.set_watermark("bits" , self.watermark)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]:
# can't encode images that are smaller than 256
if images.shape[-1] < 2_56:
return images
a_ = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1).float().numpy()
a_ = [self.encoder.encode(__UpperCAmelCase , "dwtDct") for image in images]
a_ = torch.from_numpy(np.array(__UpperCAmelCase)).permute(0 , 3 , 1 , 2)
a_ = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0)
return images
| 303
| 1
|
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) ->int:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = scope
def UpperCAmelCase__ ( self) ->Dict:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = ids_tensor([self.batch_size] , self.num_choices)
a_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self) ->Any:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->List[Any]:
a_ = BioGptModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)
a_ = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->str:
a_ = BioGptForCausalLM(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase) ->List[Any]:
a_ = BioGptModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
# create attention mask
a_ = torch.ones(input_ids.shape , dtype=torch.long , device=__UpperCAmelCase)
a_ = self.seq_length // 2
a_ = 0
# first forward pass
a_ , a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase).to_tuple()
# create hypothetical next token and extent to next_input_ids
a_ = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
a_ = ids_tensor((1,) , __UpperCAmelCase).item() + 1
a_ = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
a_ = random_other_next_tokens
# append to next input_ids and attn_mask
a_ = torch.cat([input_ids, next_tokens] , dim=-1)
a_ = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__UpperCAmelCase)] , dim=1 , )
# get two different outputs
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)["last_hidden_state"]
a_ = model(__UpperCAmelCase , past_key_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase)["last_hidden_state"]
# select random slice
a_ = ids_tensor((1,) , output_from_past.shape[-1]).item()
a_ = output_from_no_past[:, -1, random_slice_idx].detach()
a_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase) ->str:
a_ = BioGptModel(config=__UpperCAmelCase).to(__UpperCAmelCase).eval()
a_ = torch.ones(input_ids.shape , dtype=torch.long , device=__UpperCAmelCase)
# first forward pass
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase)
a_ , a_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
a_ = ids_tensor((self.batch_size, 3) , config.vocab_size)
a_ = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
a_ = torch.cat([input_ids, next_tokens] , dim=-1)
a_ = torch.cat([attention_mask, next_attn_mask] , dim=-1)
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)["last_hidden_state"]
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase)[
"last_hidden_state"
]
# select random slice
a_ = ids_tensor((1,) , output_from_past.shape[-1]).item()
a_ = output_from_no_past[:, -3:, random_slice_idx].detach()
a_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , __UpperCAmelCase=False) ->Union[str, Any]:
a_ = BioGptForCausalLM(__UpperCAmelCase)
model.to(__UpperCAmelCase)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
a_ = model(__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def UpperCAmelCase__ ( self , __UpperCAmelCase , *__UpperCAmelCase) ->Tuple:
a_ = BioGptModel(__UpperCAmelCase)
a_ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.001)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.01)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase) ->Dict:
a_ = self.num_labels
a_ = BioGptForTokenClassification(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Optional[int] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
a_ : Any = (BioGptForCausalLM,) if is_torch_available() else ()
a_ : Optional[Any] = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : Dict = False
def UpperCAmelCase__ ( self) ->Any:
a_ = BioGptModelTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Any:
a_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ = type
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__UpperCAmelCase , gradient_checkpointing=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->Any:
a_ = BioGptForCausalLM.from_pretrained("microsoft/biogpt")
model.to(__UpperCAmelCase)
a_ = BioGptTokenizer.from_pretrained("microsoft/biogpt")
a_ = "left"
# Define PAD Token = EOS Token = 50256
a_ = tokenizer.eos_token
a_ = model.config.eos_token_id
# use different length sentences to test batching
a_ = [
"Hello, my dog is a little",
"Today, I",
]
a_ = tokenizer(__UpperCAmelCase , return_tensors="pt" , padding=__UpperCAmelCase)
a_ = inputs["input_ids"].to(__UpperCAmelCase)
a_ = model.generate(
input_ids=__UpperCAmelCase , attention_mask=inputs["attention_mask"].to(__UpperCAmelCase) , )
a_ = tokenizer(sentences[0] , return_tensors="pt").input_ids.to(__UpperCAmelCase)
a_ = model.generate(input_ids=__UpperCAmelCase)
a_ = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
a_ = tokenizer(sentences[1] , return_tensors="pt").input_ids.to(__UpperCAmelCase)
a_ = model.generate(input_ids=__UpperCAmelCase , max_length=model.config.max_length - num_paddings)
a_ = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase)
a_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase)
a_ = tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase)
a_ = [
"Hello, my dog is a little bit bigger than a little bit.",
"Today, I have a good idea of how to use the information",
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence])
@slow
def UpperCAmelCase__ ( self) ->Optional[int]:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = BioGptModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = 3
a_ = input_dict["input_ids"]
a_ = input_ids.ne(1).to(__UpperCAmelCase)
a_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
a_ = BioGptForSequenceClassification(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def UpperCAmelCase__ ( self) ->Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = 3
a_ = "multi_label_classification"
a_ = input_dict["input_ids"]
a_ = input_ids.ne(1).to(__UpperCAmelCase)
a_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
a_ = BioGptForSequenceClassification(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = BioGptForCausalLM.from_pretrained("microsoft/biogpt")
a_ = torch.tensor([[2, 48_05, 9, 6_56, 21]])
a_ = model(__UpperCAmelCase)[0]
a_ = 4_23_84
a_ = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
@slow
def UpperCAmelCase__ ( self) ->int:
a_ = BioGptTokenizer.from_pretrained("microsoft/biogpt")
a_ = BioGptForCausalLM.from_pretrained("microsoft/biogpt")
model.to(__UpperCAmelCase)
torch.manual_seed(0)
a_ = tokenizer("COVID-19 is" , return_tensors="pt").to(__UpperCAmelCase)
a_ = model.generate(
**__UpperCAmelCase , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=__UpperCAmelCase , )
a_ = tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase)
a_ = (
"COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"
" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"
" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"
" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"
" more than 800,000 deaths."
)
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
| 303
|
"""simple docstring"""
import math
UpperCamelCase_ = 10
UpperCamelCase_ = 7
UpperCamelCase_ = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCamelCase ( UpperCAmelCase = 20 ) ->str:
"""simple docstring"""
a_ = math.comb(UpperCAmelCase , UpperCAmelCase )
a_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCAmelCase )
a_ = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 303
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase_ = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['PoolFormerFeatureExtractor']
UpperCamelCase_ = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 303
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCamelCase_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
a_ = k.replace(UpperCAmelCase , UpperCAmelCase )
return k
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->PegasusForConditionalGeneration:
"""simple docstring"""
a_ = DEFAULTS.copy()
cfg_kwargs.update(UpperCAmelCase )
a_ = PegasusConfig(**UpperCAmelCase )
a_ = PegasusForConditionalGeneration(UpperCAmelCase )
a_ = torch_model.model.state_dict()
a_ = {}
for k, v in tf_weights.items():
a_ = rename_state_dict_key(UpperCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
a_ = v.T
a_ = torch.tensor(UpperCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
a_ = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
a_ = mapping["shared.weight"]
a_ = mapping["shared.weight"]
a_ = {k: torch.zeros_like(UpperCAmelCase ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**UpperCAmelCase )
a_ , a_ = torch_model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
a_ = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def UpperCamelCase ( UpperCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) ->Dict:
"""simple docstring"""
a_ = tf.train.list_variables(UpperCAmelCase )
a_ = {}
a_ = ["Adafactor", "global_step"]
for name, shape in tqdm(UpperCAmelCase , desc="converting tf checkpoint to dict" ):
a_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
a_ = tf.train.load_variable(UpperCAmelCase , UpperCAmelCase )
a_ = array
return tf_weights
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = Path(UpperCAmelCase ).parent.name
a_ = task_specific_params[F'''summarization_{dataset}''']["max_position_embeddings"]
a_ = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=UpperCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCAmelCase )
# convert model
a_ = get_tf_weights_as_numpy(UpperCAmelCase )
a_ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
a_ = task_specific_params
a_ = convert_pegasus(UpperCAmelCase , UpperCAmelCase )
torch_model.save_pretrained(UpperCAmelCase )
a_ = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(UpperCAmelCase , Path(UpperCAmelCase ) / "pytorch_model.bin" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase_ = parser.parse_args()
if args.save_dir is None:
UpperCamelCase_ = Path(args.tf_ckpt_path).parent.name
UpperCamelCase_ = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 303
| 1
|
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCamelCase_ = logging.get_logger(__name__)
# General docstring
UpperCamelCase_ = 'ResNetConfig'
# Base docstring
UpperCamelCase_ = 'microsoft/resnet-50'
UpperCamelCase_ = [1, 2048, 7, 7]
# Image classification docstring
UpperCamelCase_ = 'microsoft/resnet-50'
UpperCamelCase_ = 'tiger cat'
UpperCamelCase_ = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class snake_case ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 3 , __UpperCAmelCase = 1 , __UpperCAmelCase = "relu") ->Union[str, Any]:
super().__init__()
a_ = nn.Convad(
__UpperCAmelCase , __UpperCAmelCase , kernel_size=__UpperCAmelCase , stride=__UpperCAmelCase , padding=kernel_size // 2 , bias=__UpperCAmelCase)
a_ = nn.BatchNormad(__UpperCAmelCase)
a_ = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Tensor:
a_ = self.convolution(__UpperCAmelCase)
a_ = self.normalization(__UpperCAmelCase)
a_ = self.activation(__UpperCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
def __init__( self , __UpperCAmelCase) ->Any:
super().__init__()
a_ = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act)
a_ = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1)
a_ = config.num_channels
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Tensor:
a_ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration.")
a_ = self.embedder(__UpperCAmelCase)
a_ = self.pooler(__UpperCAmelCase)
return embedding
class snake_case ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 2) ->List[str]:
super().__init__()
a_ = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1 , stride=__UpperCAmelCase , bias=__UpperCAmelCase)
a_ = nn.BatchNormad(__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Tensor:
a_ = self.convolution(__UpperCAmelCase)
a_ = self.normalization(__UpperCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = "relu") ->Union[str, Any]:
super().__init__()
a_ = in_channels != out_channels or stride != 1
a_ = (
ResNetShortCut(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase) if should_apply_shortcut else nn.Identity()
)
a_ = nn.Sequential(
ResNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase) , ResNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , activation=__UpperCAmelCase) , )
a_ = ACTaFN[activation]
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[Any]:
a_ = hidden_state
a_ = self.layer(__UpperCAmelCase)
a_ = self.shortcut(__UpperCAmelCase)
hidden_state += residual
a_ = self.activation(__UpperCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = "relu" , __UpperCAmelCase = 4) ->Dict:
super().__init__()
a_ = in_channels != out_channels or stride != 1
a_ = out_channels // reduction
a_ = (
ResNetShortCut(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase) if should_apply_shortcut else nn.Identity()
)
a_ = nn.Sequential(
ResNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1) , ResNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase) , ResNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1 , activation=__UpperCAmelCase) , )
a_ = ACTaFN[activation]
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->str:
a_ = hidden_state
a_ = self.layer(__UpperCAmelCase)
a_ = self.shortcut(__UpperCAmelCase)
hidden_state += residual
a_ = self.activation(__UpperCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 2 , __UpperCAmelCase = 2 , ) ->str:
super().__init__()
a_ = ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer
a_ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase , activation=config.hidden_act) , *[layer(__UpperCAmelCase , __UpperCAmelCase , activation=config.hidden_act) for _ in range(depth - 1)] , )
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Tensor:
a_ = input
for layer in self.layers:
a_ = layer(__UpperCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
def __init__( self , __UpperCAmelCase) ->List[Any]:
super().__init__()
a_ = nn.ModuleList([])
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
__UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ))
a_ = zip(config.hidden_sizes , config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(__UpperCAmelCase , config.depths[1:]):
self.stages.append(ResNetStage(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , depth=__UpperCAmelCase))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = True) ->BaseModelOutputWithNoAttention:
a_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
a_ = hidden_states + (hidden_state,)
a_ = stage_module(__UpperCAmelCase)
if output_hidden_states:
a_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(
last_hidden_state=__UpperCAmelCase , hidden_states=__UpperCAmelCase , )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : List[Any] = ResNetConfig
a_ : List[Any] = """resnet"""
a_ : Tuple = """pixel_values"""
a_ : Any = True
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]:
if isinstance(__UpperCAmelCase , nn.Convad):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu")
elif isinstance(__UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight , 1)
nn.init.constant_(module.bias , 0)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=False) ->List[Any]:
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a_ = value
UpperCamelCase_ = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
UpperCamelCase_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" , SCREAMING_SNAKE_CASE_ , )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase) ->Tuple:
super().__init__(__UpperCAmelCase)
a_ = config
a_ = ResNetEmbeddings(__UpperCAmelCase)
a_ = ResNetEncoder(__UpperCAmelCase)
a_ = nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None) ->BaseModelOutputWithPoolingAndNoAttention:
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = self.embedder(__UpperCAmelCase)
a_ = self.encoder(
__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase)
a_ = encoder_outputs[0]
a_ = self.pooler(__UpperCAmelCase)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__UpperCAmelCase , pooler_output=__UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , SCREAMING_SNAKE_CASE_ , )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase) ->Union[str, Any]:
super().__init__(__UpperCAmelCase)
a_ = config.num_labels
a_ = ResNetModel(__UpperCAmelCase)
# classification head
a_ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase__ ( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , ) ->ImageClassifierOutputWithNoAttention:
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = self.resnet(__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase)
a_ = outputs.pooler_output if return_dict else outputs[1]
a_ = self.classifier(__UpperCAmelCase)
a_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
a_ = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
a_ = "single_label_classification"
else:
a_ = "multi_label_classification"
if self.config.problem_type == "regression":
a_ = MSELoss()
if self.num_labels == 1:
a_ = loss_fct(logits.squeeze() , labels.squeeze())
else:
a_ = loss_fct(__UpperCAmelCase , __UpperCAmelCase)
elif self.config.problem_type == "single_label_classification":
a_ = CrossEntropyLoss()
a_ = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
a_ = BCEWithLogitsLoss()
a_ = loss_fct(__UpperCAmelCase , __UpperCAmelCase)
if not return_dict:
a_ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__UpperCAmelCase , logits=__UpperCAmelCase , hidden_states=outputs.hidden_states)
@add_start_docstrings(
"""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
""" , SCREAMING_SNAKE_CASE_ , )
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase) ->Dict:
super().__init__(__UpperCAmelCase)
super()._init_backbone(__UpperCAmelCase)
a_ = [config.embedding_size] + config.hidden_sizes
a_ = ResNetEmbeddings(__UpperCAmelCase)
a_ = ResNetEncoder(__UpperCAmelCase)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase)
@replace_return_docstrings(output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None) ->BackboneOutput:
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = self.embedder(__UpperCAmelCase)
a_ = self.encoder(__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase)
a_ = outputs.hidden_states
a_ = ()
for idx, stage in enumerate(self.stage_names):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
a_ = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=__UpperCAmelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=__UpperCAmelCase , )
| 303
|
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=50 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=None , ) ->Dict:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = initializer_range
a_ = use_labels
a_ = scope
def UpperCAmelCase__ ( self) ->Any:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length])
if self.use_labels:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase__ ( self) ->Optional[Any]:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self) ->List[str]:
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.prepare_config_and_inputs()
a_ = True
a_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->str:
a_ = BertGenerationEncoder(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)
a_ = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->Union[str, Any]:
a_ = True
a_ = BertGenerationEncoder(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->List[str]:
a_ = True
a_ = True
a_ = BertGenerationDecoder(config=__UpperCAmelCase).to(__UpperCAmelCase).eval()
# first forward pass
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , )
a_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a_ = ids_tensor((self.batch_size, 3) , config.vocab_size)
a_ = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
a_ = torch.cat([input_ids, next_tokens] , dim=-1)
a_ = torch.cat([input_mask, next_mask] , dim=-1)
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
# select random slice
a_ = ids_tensor((1,) , output_from_past.shape[-1]).item()
a_ = output_from_no_past[:, -3:, random_slice_idx].detach()
a_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , ) ->Tuple:
a_ = BertGenerationDecoder(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase__ ( self) ->str:
a_ , a_ , a_ , a_ = self.prepare_config_and_inputs()
a_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[str] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
a_ : Optional[int] = (BertGenerationDecoder,) if is_torch_available() else ()
a_ : List[Any] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = BertGenerationEncoderTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->Optional[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
a_ , a_ , a_ , a_ = self.model_tester.prepare_config_and_inputs()
a_ = "bert"
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
# This regression test was failing with PyTorch < 1.3
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
a_ = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->str:
a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->int:
a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
a_ = model(__UpperCAmelCase)[0]
a_ = torch.Size([1, 8, 10_24])
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->List[str]:
a_ = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
a_ = model(__UpperCAmelCase)[0]
a_ = torch.Size([1, 8, 5_03_58])
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
| 303
| 1
|
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase = "▁" , __UpperCAmelCase = True , __UpperCAmelCase = "<unk>" , __UpperCAmelCase = "</s>" , __UpperCAmelCase = "<pad>" , ) ->str:
a_ = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
a_ = [None] * len(self.special_tokens)
for token_dict in self.special_tokens.values():
a_ = token_dict["token"]
a_ = Tokenizer(Unigram())
a_ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}") , " "),
normalizers.Lowercase(),
])
a_ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase),
pre_tokenizers.Digits(individual_digits=__UpperCAmelCase),
pre_tokenizers.Punctuation(),
])
a_ = decoders.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase)
a_ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
a_ = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(__UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->Optional[Any]:
a_ = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a_ = [files]
self._tokenizer.train(__UpperCAmelCase , trainer=__UpperCAmelCase)
self.add_unk_id()
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->int:
a_ = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
self._tokenizer.train_from_iterator(__UpperCAmelCase , trainer=__UpperCAmelCase)
self.add_unk_id()
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = json.loads(self._tokenizer.to_str())
a_ = self.special_tokens["unk"]["id"]
a_ = Tokenizer.from_str(json.dumps(__UpperCAmelCase))
| 303
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
if "resnet-50" in model_name:
a_ = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
a_ = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
a_ = DetrConfig(use_timm_backbone=UpperCAmelCase , backbone_config=UpperCAmelCase )
# set label attributes
a_ = "panoptic" in model_name
if is_panoptic:
a_ = 250
else:
a_ = 91
a_ = "huggingface/label-files"
a_ = "coco-detection-id2label.json"
a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = idalabel
a_ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->Optional[Any]:
"""simple docstring"""
a_ = ""
if is_panoptic:
a_ = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[:256, :]
a_ = in_proj_bias[:256]
a_ = in_proj_weight[256:512, :]
a_ = in_proj_bias[256:512]
a_ = in_proj_weight[-256:, :]
a_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[:256, :]
a_ = in_proj_bias[:256]
a_ = in_proj_weight[256:512, :]
a_ = in_proj_bias[256:512]
a_ = in_proj_weight[-256:, :]
a_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
a_ = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a_ = in_proj_weight_cross_attn[:256, :]
a_ = in_proj_bias_cross_attn[:256]
a_ = in_proj_weight_cross_attn[256:512, :]
a_ = in_proj_bias_cross_attn[256:512]
a_ = in_proj_weight_cross_attn[-256:, :]
a_ = in_proj_bias_cross_attn[-256:]
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) ->List[str]:
"""simple docstring"""
a_ , a_ = get_detr_config(UpperCAmelCase )
# load original model from torch hub
a_ = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(F'''Converting model {model_name}...''' )
a_ = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=UpperCAmelCase ).eval()
a_ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(UpperCAmelCase ):
if is_panoptic:
a_ = "detr." + src
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCAmelCase , is_panoptic=UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a_ = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
# finally, create HuggingFace model and load state dict
a_ = DetrForSegmentation(UpperCAmelCase ) if is_panoptic else DetrForObjectDetection(UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
model.eval()
# verify our conversion on an image
a_ = "coco_panoptic" if is_panoptic else "coco_detection"
a_ = DetrImageProcessor(format=UpperCAmelCase )
a_ = processor(images=prepare_img() , return_tensors="pt" )
a_ = encoding["pixel_values"]
a_ = detr(UpperCAmelCase )
a_ = model(UpperCAmelCase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
UpperCamelCase_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 303
| 1
|
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = {'facebook/bart-base': BartForConditionalGeneration}
UpperCamelCase_ = {'facebook/bart-base': BartTokenizer}
def UpperCamelCase ( ) ->List[str]:
"""simple docstring"""
a_ = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=UpperCAmelCase , default=UpperCAmelCase , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=UpperCAmelCase , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=UpperCAmelCase , default=UpperCAmelCase , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=UpperCAmelCase , help="Path to pretrained model or model identifier from huggingface.co/models." , required=UpperCAmelCase , )
parser.add_argument(
"--config_name" , type=UpperCAmelCase , default=UpperCAmelCase , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=UpperCAmelCase , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=UpperCAmelCase , default=UpperCAmelCase , help="Where to store the final ONNX file." )
a_ = parser.parse_args()
return args
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase="cpu" ) ->Optional[Any]:
"""simple docstring"""
a_ = model_dict[model_name].from_pretrained(UpperCAmelCase ).to(UpperCAmelCase )
a_ = tokenizer_dict[model_name].from_pretrained(UpperCAmelCase )
if model_name in ["facebook/bart-base"]:
a_ = 0
a_ = None
a_ = 0
return huggingface_model, tokenizer
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
model.eval()
a_ = None
a_ = torch.jit.script(BARTBeamSearchGenerator(UpperCAmelCase ) )
with torch.no_grad():
a_ = "My friends are cool but they eat too many carbs."
a_ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors="pt" ).to(model.device )
a_ = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=UpperCAmelCase , max_length=UpperCAmelCase , early_stopping=UpperCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
UpperCAmelCase , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , UpperCAmelCase , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=UpperCAmelCase , )
logger.info("Model exported to {}".format(UpperCAmelCase ) )
a_ = remove_dup_initializers(os.path.abspath(UpperCAmelCase ) )
logger.info("Deduplicated and optimized model written to {}".format(UpperCAmelCase ) )
a_ = onnxruntime.InferenceSession(UpperCAmelCase )
a_ = ort_sess.run(
UpperCAmelCase , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(UpperCAmelCase ),
"max_length": np.array(UpperCAmelCase ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def UpperCamelCase ( ) ->Any:
"""simple docstring"""
a_ = parse_args()
a_ = 5
a_ = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
a_ = torch.device(args.device )
a_ , a_ = load_model_tokenizer(args.model_name_or_path , UpperCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(UpperCAmelCase )
if args.max_length:
a_ = args.max_length
if args.num_beams:
a_ = args.num_beams
if args.output_file_path:
a_ = args.output_file_path
else:
a_ = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
main()
| 303
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
a_ = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
a_ = True if "large" in model_name or "huge" in model_name else False
a_ = True if "large" in model_name or "huge" in model_name else False
a_ = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
a_ = [3, 3, 3, 3]
a_ = [5, 5, 5, 5]
elif "fl4" in model_name:
a_ = [4, 4, 4, 4]
a_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
a_ = [3, 3, 3, 3]
if "lrf" in model_name:
a_ = [3, 3, 3, 3]
else:
a_ = [2, 2, 2, 2]
if "tiny" in model_name:
a_ = 96
elif "small" in model_name:
a_ = 96
elif "base" in model_name:
a_ = 128
elif "large" in model_name:
a_ = 192
elif "xlarge" in model_name:
a_ = 256
elif "huge" in model_name:
a_ = 352
# set label information
a_ = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
a_ = "imagenet-22k-id2label.json"
else:
a_ = "imagenet-1k-id2label.json"
a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = {v: k for k, v in idalabel.items()}
a_ = FocalNetConfig(
embed_dim=UpperCAmelCase , depths=UpperCAmelCase , focal_levels=UpperCAmelCase , focal_windows=UpperCAmelCase , use_conv_embed=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase , use_post_layernorm=UpperCAmelCase , use_layerscale=UpperCAmelCase , )
return config
def UpperCamelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
if "patch_embed.proj" in name:
a_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a_ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
a_ = "encoder." + name
if "encoder.layers" in name:
a_ = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
a_ = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
a_ = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
a_ = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
a_ = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
a_ = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
a_ = "layernorm.weight"
if name == "norm.bias":
a_ = "layernorm.bias"
if "head" in name:
a_ = name.replace("head" , "classifier" )
else:
a_ = "focalnet." + name
return name
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) ->Dict:
"""simple docstring"""
a_ = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
a_ = model_name_to_url[model_name]
print("Checkpoint URL: " , UpperCAmelCase )
a_ = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
a_ = get_focalnet_config(UpperCAmelCase )
a_ = FocalNetForImageClassification(UpperCAmelCase )
model.eval()
# load state dict
model.load_state_dict(UpperCAmelCase )
# verify conversion
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = BitImageProcessor(
do_resize=UpperCAmelCase , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase , crop_size=224 , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , )
a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
a_ = processor(images=UpperCAmelCase , return_tensors="pt" )
a_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
a_ = image_transforms(UpperCAmelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCAmelCase , atol=1E-4 )
a_ = model(**UpperCAmelCase )
a_ = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
a_ = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
a_ = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
a_ = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
a_ = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
a_ = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
a_ = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
UpperCamelCase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 303
| 1
|
"""simple docstring"""
import os
import sys
UpperCamelCase_ = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCamelCase_ = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCamelCase ( *UpperCAmelCase , **UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
return AutoConfig.from_pretrained(*UpperCAmelCase , **UpperCAmelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCamelCase ( *UpperCAmelCase , **UpperCAmelCase ) ->Tuple:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*UpperCAmelCase , **UpperCAmelCase )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCamelCase ( *UpperCAmelCase , **UpperCAmelCase ) ->Dict:
"""simple docstring"""
return AutoModel.from_pretrained(*UpperCAmelCase , **UpperCAmelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCamelCase ( *UpperCAmelCase , **UpperCAmelCase ) ->Any:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*UpperCAmelCase , **UpperCAmelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCamelCase ( *UpperCAmelCase , **UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*UpperCAmelCase , **UpperCAmelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCamelCase ( *UpperCAmelCase , **UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*UpperCAmelCase , **UpperCAmelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCamelCase ( *UpperCAmelCase , **UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*UpperCAmelCase , **UpperCAmelCase )
| 303
|
"""simple docstring"""
import os
import numpy
import onnx
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = a.name
a_ = b.name
a_ = ""
a_ = ""
a_ = a == b
a_ = name_a
a_ = name_b
return res
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase , UpperCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = list(model.graph.initializer )
a_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
a_ = inits[i].name
a_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = os.path.dirname(UpperCAmelCase )
a_ = os.path.basename(UpperCAmelCase )
a_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) )
a_ = list(model.graph.initializer )
a_ = set()
a_ = {}
a_ = []
a_ = 0
for i in range(len(UpperCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase )
dup_set.add(UpperCAmelCase )
a_ = inits[j].data_type
a_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , UpperCAmelCase )
total_reduced_size += mem_size
a_ = inits[i].name
a_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase )
else:
a_ = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 1_024 / 1_024 / 1_024 , "GB" )
a_ = sorted(UpperCAmelCase )
_remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
a_ = "optimized_" + model_file_name
a_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
onnx.save(UpperCAmelCase , UpperCAmelCase )
return new_model
| 303
| 1
|
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->list[list[int]]:
"""simple docstring"""
a_ = []
create_all_state(1 , UpperCAmelCase , UpperCAmelCase , [] , UpperCAmelCase )
return result
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->None:
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(UpperCAmelCase , total_number - level + 2 ):
current_list.append(UpperCAmelCase )
create_all_state(i + 1 , UpperCAmelCase , level - 1 , UpperCAmelCase , UpperCAmelCase )
current_list.pop()
def UpperCamelCase ( UpperCAmelCase ) ->None:
"""simple docstring"""
for i in total_list:
print(*UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = 4
UpperCamelCase_ = 2
UpperCamelCase_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 303
|
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase = "▁" , __UpperCAmelCase = True , __UpperCAmelCase = "<unk>" , __UpperCAmelCase = "</s>" , __UpperCAmelCase = "<pad>" , ) ->str:
a_ = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
a_ = [None] * len(self.special_tokens)
for token_dict in self.special_tokens.values():
a_ = token_dict["token"]
a_ = Tokenizer(Unigram())
a_ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}") , " "),
normalizers.Lowercase(),
])
a_ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase),
pre_tokenizers.Digits(individual_digits=__UpperCAmelCase),
pre_tokenizers.Punctuation(),
])
a_ = decoders.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase)
a_ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
a_ = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(__UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->Optional[Any]:
a_ = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a_ = [files]
self._tokenizer.train(__UpperCAmelCase , trainer=__UpperCAmelCase)
self.add_unk_id()
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->int:
a_ = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
self._tokenizer.train_from_iterator(__UpperCAmelCase , trainer=__UpperCAmelCase)
self.add_unk_id()
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = json.loads(self._tokenizer.to_str())
a_ = self.special_tokens["unk"]["id"]
a_ = Tokenizer.from_str(json.dumps(__UpperCAmelCase))
| 303
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[str, Any] = """roberta"""
def __init__( self , __UpperCAmelCase=5_02_65 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) ->int:
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = use_cache
a_ = classifier_dropout
class snake_case ( SCREAMING_SNAKE_CASE_ ):
@property
def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
a_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 303
|
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = hf_hub_url(repo_id=UpperCAmelCase , path=UpperCAmelCase , revision=UpperCAmelCase )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(UpperCAmelCase )}'''
| 303
| 1
|
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = tf.convert_to_tensor(
[
[
8.2_220_991, # 3rd highest value; idx. 0
-0.5_620_044,
5.23_229_752,
4.0_386_393,
-6.8_798_378,
-0.54_785_802,
-3.2_012_153,
2.92_777_176,
1.88_171_953,
7.35_341_276, # 5th highest value; idx. 9
8.43_207_833, # 2nd highest value; idx. 10
-9.85_711_836,
-5.96_209_236,
-1.13_039_161,
-7.1_115_294,
-0.8_369_633,
-5.3_186_408,
7.06_427_407,
0.81_369_344,
-0.82_023_817,
-5.9_179_796,
0.58_813_443,
-6.99_778_438,
4.71_551_189,
-0.18_771_637,
7.44_020_759, # 4th highest value; idx. 25
9.38_450_987, # 1st highest value; idx. 26
2.12_662_941,
-9.32_562_038,
2.35_652_522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_425_518,
4.53_139_238,
-5.57_510_464,
-6.28_030_699,
-7.19_529_503,
-4.02_122_551,
1.39_337_037,
-6.06_707_057,
1.59_480_517,
-9.643_119,
0.03_907_799,
0.67_231_762,
-8.88_206_726,
6.27_115_922, # 4th highest value; idx. 13
2.28_520_723,
4.82_767_506,
4.30_421_368,
8.8_275_313, # 2nd highest value; idx. 17
5.44_029_958, # 5th highest value; idx. 18
-4.4_735_794,
7.38_579_536, # 3rd highest value; idx. 20
-2.91_051_663,
2.61_946_077,
-2.5_674_762,
-9.48_959_302,
-4.02_922_645,
-1.35_416_918,
9.67_702_323, # 1st highest value; idx. 27
-5.89_478_553,
1.85_370_467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
a_ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
a_ = tf.convert_to_tensor(
[8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above
a_ = tf_top_k_top_p_filtering(__UpperCAmelCase , top_k=10 , top_p=0.6 , min_tokens_to_keep=4)
a_ = output[output != -float("inf")]
a_ = tf.cast(
tf.where(tf.not_equal(__UpperCAmelCase , tf.constant(-float("inf") , dtype=tf.floataa))) , dtype=tf.intaa , )
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-12)
tf.debugging.assert_equal(__UpperCAmelCase , __UpperCAmelCase)
@require_tf
class snake_case ( unittest.TestCase , SCREAMING_SNAKE_CASE_ ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
a_ : Any = {
"""AutoModelForCausalLM""": TFAutoModelForCausalLM,
"""AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq,
"""AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM,
"""AutoModelForVision2Seq""": TFAutoModelForVisionaSeq,
"""LogitsProcessorList""": TFLogitsProcessorList,
"""MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor,
"""create_tensor_fn""": tf.convert_to_tensor,
"""floats_tensor""": floats_tensor,
"""return_tensors""": """tf""",
}
@slow
def UpperCAmelCase__ ( self) ->List[Any]:
# TF-only test: tf.saved_model export
a_ = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2")
a_ = 2
a_ = 2
class snake_case ( tf.Module ):
def __init__( self , __UpperCAmelCase) ->Optional[int]:
super(__UpperCAmelCase , self).__init__()
a_ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids"),
tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask"),
) , jit_compile=__UpperCAmelCase , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->Any:
a_ = self.model.generate(
input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , max_new_tokens=__UpperCAmelCase , return_dict_in_generate=__UpperCAmelCase , )
return {"sequences": outputs["sequences"]}
a_ = [[2, 0], [1_02, 1_03]]
a_ = [[1, 0], [1, 1]]
a_ = DummyModel(model=__UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__UpperCAmelCase , __UpperCAmelCase , signatures={"serving_default": dummy_model.serving})
a_ = tf.saved_model.load(__UpperCAmelCase).signatures["serving_default"]
for batch_size in range(1 , len(__UpperCAmelCase) + 1):
a_ = {
"input_ids": tf.constant(dummy_input_ids[:batch_size]),
"attention_mask": tf.constant(dummy_attention_masks[:batch_size]),
}
a_ = serving_func(**__UpperCAmelCase)["sequences"]
a_ = test_model.generate(**__UpperCAmelCase , max_new_tokens=__UpperCAmelCase)
tf.debugging.assert_equal(__UpperCAmelCase , __UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->Optional[int]:
# TF-only test: tf.saved_model export
a_ = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2")
a_ = 1
a_ = 2
class snake_case ( tf.Module ):
def __init__( self , __UpperCAmelCase) ->int:
super(__UpperCAmelCase , self).__init__()
a_ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids"),
tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask"),
) , jit_compile=__UpperCAmelCase , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->List[Any]:
a_ = self.model.generate(
input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , max_new_tokens=__UpperCAmelCase , return_dict_in_generate=__UpperCAmelCase , )
return {"sequences": outputs["sequences"]}
a_ = [[2], [1_02, 1_03]]
a_ = [[1], [1, 1]]
a_ = DummyModel(model=__UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__UpperCAmelCase , __UpperCAmelCase , signatures={"serving_default": dummy_model.serving})
a_ = tf.saved_model.load(__UpperCAmelCase).signatures["serving_default"]
for input_row in range(len(__UpperCAmelCase)):
a_ = {
"input_ids": tf.constant([dummy_input_ids[input_row]]),
"attention_mask": tf.constant([dummy_attention_masks[input_row]]),
}
a_ = serving_func(**__UpperCAmelCase)["sequences"]
a_ = test_model.generate(**__UpperCAmelCase , max_new_tokens=__UpperCAmelCase)
tf.debugging.assert_equal(__UpperCAmelCase , __UpperCAmelCase)
@slow
@require_tensorflow_text
def UpperCAmelCase__ ( self) ->List[Any]:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=__UpperCAmelCase)
class snake_case ( tf.keras.layers.Layer ):
def __init__( self) ->str:
super().__init__()
a_ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(__UpperCAmelCase , "spiece.model") , "rb").read())
a_ = TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5")
def UpperCAmelCase__ ( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase) ->Optional[int]:
a_ = self.tokenizer.tokenize(__UpperCAmelCase)
a_ , a_ = text.pad_model_inputs(
__UpperCAmelCase , max_seq_length=64 , pad_value=self.model.config.pad_token_id)
a_ = self.model.generate(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase)
return self.tokenizer.detokenize(__UpperCAmelCase)
a_ = CompleteSentenceTransformer()
a_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs")
a_ = complete_model(__UpperCAmelCase)
a_ = tf.keras.Model(__UpperCAmelCase , __UpperCAmelCase)
keras_model.save(__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
# Has PT equivalent: this test relies on random sampling
a_ = {
"do_sample": True,
"num_beams": 1,
"top_p": 0.7,
"top_k": 10,
"temperature": 0.7,
}
a_ = 14
a_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
a_ = "Hello, my dog is cute and"
a_ = tokenizer(__UpperCAmelCase , return_tensors="tf")
a_ = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2")
a_ = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(":/CPU:0"):
tf.random.set_seed(0)
a_ = model.generate(**__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
self.assertTrue(expectation == len(generated_tokens[0]))
a_ = [6_38, 1_98]
with tf.device(":/CPU:0"):
tf.random.set_seed(0)
a_ = model.generate(**__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
self.assertTrue(expectation == len(generated_tokens[0]))
def UpperCAmelCase__ ( self) ->Union[str, Any]:
# Has PT equivalent: ample use of framework-specific code
a_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
a_ = "Hugging Face is a technology company based in New York and Paris."
a_ = bart_tokenizer(__UpperCAmelCase , return_tensors="tf").input_ids
a_ = TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart")
a_ = bart_model.generate(__UpperCAmelCase).numpy()
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase) ->List[Any]:
return super().call(__UpperCAmelCase , **__UpperCAmelCase)
a_ = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart")
a_ = bart_model.generate(__UpperCAmelCase , foo="bar").numpy()
self.assertTrue(np.array_equal(__UpperCAmelCase , __UpperCAmelCase))
class snake_case ( bart_model.model.encoder.__class__ ):
def UpperCAmelCase__ ( self , __UpperCAmelCase , **__UpperCAmelCase) ->Optional[int]:
return super().call(__UpperCAmelCase , **__UpperCAmelCase)
a_ = FakeEncoder(bart_model.config , bart_model.model.shared)
a_ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
a_ = bart_model.generate(__UpperCAmelCase).numpy()
with self.assertRaises(__UpperCAmelCase):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(__UpperCAmelCase , foo="bar")
| 303
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Tuple = """audio-spectrogram-transformer"""
def __init__( self , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=10 , __UpperCAmelCase=10_24 , __UpperCAmelCase=1_28 , **__UpperCAmelCase , ) ->str:
super().__init__(**__UpperCAmelCase)
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = patch_size
a_ = qkv_bias
a_ = frequency_stride
a_ = time_stride
a_ = max_length
a_ = num_mel_bins
| 303
| 1
|
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 303
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : str = """xlm-roberta"""
def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) ->Union[str, Any]:
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = use_cache
a_ = classifier_dropout
class snake_case ( SCREAMING_SNAKE_CASE_ ):
@property
def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
a_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 303
| 1
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[Any]:
a_ = 3
a_ = 2_50
a_ = ids_tensor((batch_size, length) , __UpperCAmelCase)
a_ = torch.ones((batch_size, length) , device=__UpperCAmelCase , dtype=torch.float) / length
return input_ids, scores
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ , a_ = self._get_tensors(5)
a_ = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10),
MaxTimeCriteria(max_time=0.1),
])
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase))
a_ , a_ = self._get_tensors(9)
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase))
a_ , a_ = self._get_tensors(10)
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase))
def UpperCAmelCase__ ( self) ->int:
a_ = MaxLengthCriteria(max_length=10)
a_ , a_ = self._get_tensors(5)
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase))
a_ , a_ = self._get_tensors(9)
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase))
a_ , a_ = self._get_tensors(10)
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase))
def UpperCAmelCase__ ( self) ->Tuple:
a_ = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5)
a_ , a_ = self._get_tensors(5)
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase))
a_ , a_ = self._get_tensors(9)
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase))
a_ , a_ = self._get_tensors(10)
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase))
a_ = StoppingCriteriaList([criteria])
self.assertEqual(criteria_list.max_length , 10)
def UpperCAmelCase__ ( self) ->List[str]:
a_ , a_ = self._get_tensors(5)
a_ = MaxTimeCriteria(max_time=0.1)
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase))
a_ = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2)
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase))
def UpperCAmelCase__ ( self) ->Any:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]) , 10)
with self.assertWarns(__UpperCAmelCase):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]) , 11)
a_ = validate_stopping_criteria(StoppingCriteriaList() , 11)
self.assertEqual(len(__UpperCAmelCase) , 1)
| 303
|
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=24 , __UpperCAmelCase=2 , __UpperCAmelCase=6 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=10_00 , ) ->List[str]:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = scope
a_ = range_bbox
def UpperCAmelCase__ ( self) ->int:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
a_ = bbox[i, j, 3]
a_ = bbox[i, j, 1]
a_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a_ = bbox[i, j, 2]
a_ = bbox[i, j, 0]
a_ = t
a_ = None
if self.use_input_mask:
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self) ->List[str]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Any:
a_ = LiltModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Union[str, Any]:
a_ = self.num_labels
a_ = LiltForTokenClassification(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Dict:
a_ = LiltForQuestionAnswering(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self) ->str:
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ : List[str] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : Any = False
a_ : Dict = False
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int:
return True
def UpperCAmelCase__ ( self) ->str:
a_ = LiltModelTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ = type
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->str:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->List[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = LiltModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
@slow
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__UpperCAmelCase)
a_ = torch.tensor([[1, 2]] , device=__UpperCAmelCase)
a_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__UpperCAmelCase)
# forward pass
with torch.no_grad():
a_ = model(input_ids=__UpperCAmelCase , bbox=__UpperCAmelCase)
a_ = torch.Size([1, 2, 7_68])
a_ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__UpperCAmelCase , )
self.assertTrue(outputs.last_hidden_state.shape , __UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __UpperCAmelCase , atol=1E-3))
| 303
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
a_ : Any = ["""torch""", """scipy"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->Any:
requires_backends(self , ["torch", "scipy"])
@classmethod
def UpperCAmelCase__ ( cls , *__UpperCAmelCase , **__UpperCAmelCase) ->str:
requires_backends(cls , ["torch", "scipy"])
@classmethod
def UpperCAmelCase__ ( cls , *__UpperCAmelCase , **__UpperCAmelCase) ->Optional[Any]:
requires_backends(cls , ["torch", "scipy"])
| 303
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def UpperCAmelCase__ ( self) ->Any:
a_ = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(__UpperCAmelCase , "embed_dim"))
self.parent.assertTrue(hasattr(__UpperCAmelCase , "num_heads"))
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=[16, 48, 96] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ) ->Optional[int]:
a_ = parent
a_ = batch_size
a_ = image_size
a_ = patch_sizes
a_ = patch_stride
a_ = patch_padding
a_ = is_training
a_ = use_labels
a_ = num_labels
a_ = num_channels
a_ = embed_dim
a_ = num_heads
a_ = stride_kv
a_ = depth
a_ = cls_token
a_ = attention_drop_rate
a_ = initializer_range
a_ = layer_norm_eps
def UpperCAmelCase__ ( self) ->Any:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ = None
if self.use_labels:
# create a random int32 tensor of given shape
a_ = ids_tensor([self.batch_size] , self.num_labels)
a_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self) ->Union[str, Any]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[Any]:
a_ = TFCvtModel(config=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , training=__UpperCAmelCase)
a_ = (self.image_size, self.image_size)
a_ , a_ = image_size[0], image_size[1]
for i in range(len(self.depth)):
a_ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
a_ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str:
a_ = self.num_labels
a_ = TFCvtForImageClassification(__UpperCAmelCase)
a_ = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ = config_and_inputs
a_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Union[str, Any] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
a_ : List[Any] = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
a_ : Any = False
a_ : Dict = False
a_ : Optional[int] = False
a_ : List[Any] = False
a_ : List[Any] = False
def UpperCAmelCase__ ( self) ->List[str]:
a_ = TFCvtModelTester(self)
a_ = TFCvtConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[str]:
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions")
def UpperCAmelCase__ ( self) ->Dict:
pass
@unittest.skip(reason="Cvt does not use inputs_embeds")
def UpperCAmelCase__ ( self) ->List[str]:
pass
@unittest.skip(reason="Cvt does not support input and output embeddings")
def UpperCAmelCase__ ( self) ->Optional[Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
def UpperCAmelCase__ ( self) ->Dict:
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def UpperCAmelCase__ ( self) ->List[str]:
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8")
def UpperCAmelCase__ ( self) ->Dict:
a_ = tf.keras.mixed_precision.Policy("mixed_float16")
tf.keras.mixed_precision.set_global_policy(__UpperCAmelCase)
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32")
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(__UpperCAmelCase)
a_ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase):
a_ = model_class(__UpperCAmelCase)
a_ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase))
a_ = outputs.hidden_states
a_ = len(self.model_tester.depth)
self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->str:
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = TFCvtModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self) ->int:
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def UpperCAmelCase__ ( self) ->Any:
a_ = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=__UpperCAmelCase , return_tensors="tf")
# forward pass
a_ = model(**__UpperCAmelCase)
# verify the logits
a_ = tf.TensorShape((1, 10_00))
self.assertEqual(outputs.logits.shape , __UpperCAmelCase)
a_ = tf.constant([0.9_285, 0.9_015, -0.3_150])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __UpperCAmelCase , atol=1E-4))
| 303
| 1
|
"""simple docstring"""
UpperCamelCase_ = 0 # The first color of the flag.
UpperCamelCase_ = 1 # The second color of the flag.
UpperCamelCase_ = 2 # The third color of the flag.
UpperCamelCase_ = (red, white, blue)
def UpperCamelCase ( UpperCAmelCase ) ->list:
"""simple docstring"""
if not sequence:
return []
if len(UpperCAmelCase ) == 1:
return list(UpperCAmelCase )
a_ = 0
a_ = len(UpperCAmelCase ) - 1
a_ = 0
while mid <= high:
if sequence[mid] == colors[0]:
a_ , a_ = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
a_ , a_ = sequence[high], sequence[mid]
high -= 1
else:
a_ = F'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(UpperCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = input('Enter numbers separated by commas:\n').strip()
UpperCamelCase_ = [int(item.strip()) for item in user_input.split(',')]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 303
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Dict = """Speech2TextFeatureExtractor"""
a_ : str = """Speech2TextTokenizer"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase) ->List[str]:
super().__init__(__UpperCAmelCase , __UpperCAmelCase)
a_ = self.feature_extractor
a_ = False
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->Optional[int]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase)
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.")
a_ = kwargs.pop("raw_speech")
else:
a_ = kwargs.pop("audio" , __UpperCAmelCase)
a_ = kwargs.pop("sampling_rate" , __UpperCAmelCase)
a_ = kwargs.pop("text" , __UpperCAmelCase)
if len(__UpperCAmelCase) > 0:
a_ = args[0]
a_ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if audio is not None:
a_ = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase)
if text is not None:
a_ = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase)
if text is None:
return inputs
elif audio is None:
return encodings
else:
a_ = encodings["input_ids"]
return inputs
def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->str:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase)
def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->int:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase)
@contextmanager
def UpperCAmelCase__ ( self) ->Tuple:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call.")
a_ = True
a_ = self.tokenizer
yield
a_ = self.feature_extractor
a_ = False
| 303
| 1
|
"""simple docstring"""
import math
import os
import sys
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = ""
try:
with open(UpperCAmelCase , "rb" ) as binary_file:
a_ = binary_file.read()
for dat in data:
a_ = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->None:
"""simple docstring"""
lexicon.pop(UpperCAmelCase )
a_ = last_match_id
if math.loga(UpperCAmelCase ).is_integer():
for curr_key in lexicon:
a_ = "0" + lexicon[curr_key]
a_ = bin(UpperCAmelCase )[2:]
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = {"0": "0", "1": "1"}
a_ , a_ = "", ""
a_ = len(UpperCAmelCase )
for i in range(len(UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
a_ = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
index += 1
a_ = ""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
a_ = lexicon[curr_string]
result += last_match_id
return result
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = os.path.getsize(UpperCAmelCase )
a_ = bin(UpperCAmelCase )[2:]
a_ = len(UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->None:
"""simple docstring"""
a_ = 8
try:
with open(UpperCAmelCase , "wb" ) as opened_file:
a_ = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCAmelCase ) , UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(UpperCAmelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->None:
"""simple docstring"""
a_ = read_file_binary(UpperCAmelCase )
a_ = compress_data(UpperCAmelCase )
a_ = add_file_length(UpperCAmelCase , UpperCAmelCase )
write_file_binary(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 303
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
a_ : str = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
a_ : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
a_ : ClassVar[Features] = Features({"""summary""": Value("""string""" )} )
a_ : str = "text"
a_ : str = "summary"
@property
def UpperCAmelCase__ ( self) ->Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 303
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 1
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
a_ = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase_ = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : str = MgpstrTokenizer
a_ : Any = False
a_ : Tuple = {}
a_ : str = False
def UpperCAmelCase__ ( self) ->Any:
super().setUp()
# fmt: off
a_ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
a_ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase))))
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(__UpperCAmelCase) + "\n")
def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->Optional[Any]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]:
a_ = "tester"
a_ = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters.")
def UpperCAmelCase__ ( self) ->List[str]:
pass
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = self.get_tokenizers(do_lower_case=__UpperCAmelCase)
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}'''):
a_ = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token})
a_ = tokenizer.encode([special_token] , add_special_tokens=__UpperCAmelCase)
self.assertEqual(len(__UpperCAmelCase) , 1)
a_ = tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase)
self.assertTrue(special_token not in decoded)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}'''):
a_ , a_ = self.get_input_output_texts(__UpperCAmelCase)
a_ = tokenizer.tokenize(__UpperCAmelCase)
a_ = tokenizer.convert_tokens_to_ids(__UpperCAmelCase)
a_ = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a_ = tokenizer.convert_ids_to_tokens(__UpperCAmelCase)
self.assertNotEqual(len(__UpperCAmelCase) , 0)
a_ = tokenizer.decode(__UpperCAmelCase)
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase)
self.assertEqual(text_a.replace(" " , "") , __UpperCAmelCase)
@unittest.skip("MGP-STR tokenizer only handles one sequence.")
def UpperCAmelCase__ ( self) ->List[Any]:
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer")
def UpperCAmelCase__ ( self) ->int:
pass
| 303
|
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
a_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
a_ = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
a_ = F'''{src_lang}-{tgt_lang}'''
a_ = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=UpperCAmelCase , exist_ok=UpperCAmelCase )
a_ = os.path.join(UpperCAmelCase , "README.md" )
print(F'''Generating {path}''' )
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(UpperCAmelCase )
# make sure we are under the root of the project
UpperCamelCase_ = Path(__file__).resolve().parent.parent.parent
UpperCamelCase_ = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
UpperCamelCase_ = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
| 303
| 1
|
"""simple docstring"""
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
UpperCamelCase_ = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
UpperCamelCase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class snake_case :
def __init__( self) ->Optional[int]:
a_ = WATERMARK_BITS
a_ = WatermarkEncoder()
self.encoder.set_watermark("bits" , self.watermark)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]:
# can't encode images that are smaller than 256
if images.shape[-1] < 2_56:
return images
a_ = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1).float().numpy()
a_ = [self.encoder.encode(__UpperCAmelCase , "dwtDct") for image in images]
a_ = torch.from_numpy(np.array(__UpperCAmelCase)).permute(0 , 3 , 1 , 2)
a_ = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0)
return images
| 303
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(UpperCAmelCase , n - 1 , UpperCAmelCase ) * a) % mod
else:
a_ = binary_exponentiation(UpperCAmelCase , n / 2 , UpperCAmelCase )
return (b * b) % mod
# a prime number
UpperCamelCase_ = 701
UpperCamelCase_ = 1000000000
UpperCamelCase_ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 303
| 1
|
"""simple docstring"""
from timeit import timeit
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
if number < 0:
raise ValueError("the value of input must not be negative" )
a_ = 0
while number:
number &= number - 1
result += 1
return result
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
if number < 0:
raise ValueError("the value of input must not be negative" )
a_ = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCamelCase ( ) ->None:
"""simple docstring"""
def do_benchmark(UpperCAmelCase ) -> None:
a_ = "import __main__ as z"
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(UpperCAmelCase ) = }''' )
a_ = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=UpperCAmelCase )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(UpperCAmelCase ) = }''' )
a_ = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=UpperCAmelCase , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 303
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
| 303
| 1
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class snake_case ( unittest.TestCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=30 , __UpperCAmelCase=4_00 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=True , __UpperCAmelCase=1 / 2_55 , __UpperCAmelCase=True , ) ->List[str]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a_ = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = min_resolution
a_ = max_resolution
a_ = do_resize
a_ = size
a_ = do_normalize
a_ = image_mean
a_ = image_std
a_ = do_rescale
a_ = rescale_factor
a_ = do_pad
def UpperCAmelCase__ ( self) ->str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=False) ->Dict:
if not batched:
a_ = image_inputs[0]
if isinstance(__UpperCAmelCase , Image.Image):
a_ , a_ = image.size
else:
a_ , a_ = image.shape[1], image.shape[2]
if w < h:
a_ = int(self.size["shortest_edge"] * h / w)
a_ = self.size["shortest_edge"]
elif w > h:
a_ = self.size["shortest_edge"]
a_ = int(self.size["shortest_edge"] * w / h)
else:
a_ = self.size["shortest_edge"]
a_ = self.size["shortest_edge"]
else:
a_ = []
for image in image_inputs:
a_ , a_ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
a_ = max(__UpperCAmelCase , key=lambda __UpperCAmelCase: item[0])[0]
a_ = max(__UpperCAmelCase , key=lambda __UpperCAmelCase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Optional[Any] = DetaImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self) ->Any:
a_ = DetaImageProcessingTester(self)
@property
def UpperCAmelCase__ ( self) ->Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self) ->int:
a_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__UpperCAmelCase , "image_mean"))
self.assertTrue(hasattr(__UpperCAmelCase , "image_std"))
self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize"))
self.assertTrue(hasattr(__UpperCAmelCase , "do_resize"))
self.assertTrue(hasattr(__UpperCAmelCase , "do_rescale"))
self.assertTrue(hasattr(__UpperCAmelCase , "do_pad"))
self.assertTrue(hasattr(__UpperCAmelCase , "size"))
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33})
self.assertEqual(image_processor.do_pad , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
pass
def UpperCAmelCase__ ( self) ->Dict:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image)
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(__UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ , a_ = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase)
a_ = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self) ->int:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray)
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(__UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self) ->Optional[int]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor)
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(__UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values
a_ , a_ = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ ( self) ->str:
# prepare image and target
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r") as f:
a_ = json.loads(f.read())
a_ = {"image_id": 3_97_69, "annotations": target}
# encode them
a_ = DetaImageProcessor()
a_ = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , return_tensors="pt")
# verify pixel values
a_ = torch.Size([1, 3, 8_00, 10_66])
self.assertEqual(encoding["pixel_values"].shape , __UpperCAmelCase)
a_ = torch.tensor([0.2_796, 0.3_138, 0.3_481])
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4))
# verify area
a_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438])
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __UpperCAmelCase))
# verify boxes
a_ = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape , __UpperCAmelCase)
a_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215])
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __UpperCAmelCase , atol=1E-3))
# verify image_id
a_ = torch.tensor([3_97_69])
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __UpperCAmelCase))
# verify is_crowd
a_ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __UpperCAmelCase))
# verify class_labels
a_ = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __UpperCAmelCase))
# verify orig_size
a_ = torch.tensor([4_80, 6_40])
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __UpperCAmelCase))
# verify size
a_ = torch.tensor([8_00, 10_66])
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __UpperCAmelCase))
@slow
def UpperCAmelCase__ ( self) ->Tuple:
# prepare image, target and masks_path
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r") as f:
a_ = json.loads(f.read())
a_ = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
a_ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic")
# encode them
a_ = DetaImageProcessor(format="coco_panoptic")
a_ = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , masks_path=__UpperCAmelCase , return_tensors="pt")
# verify pixel values
a_ = torch.Size([1, 3, 8_00, 10_66])
self.assertEqual(encoding["pixel_values"].shape , __UpperCAmelCase)
a_ = torch.tensor([0.2_796, 0.3_138, 0.3_481])
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4))
# verify area
a_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147])
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __UpperCAmelCase))
# verify boxes
a_ = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape , __UpperCAmelCase)
a_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625])
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __UpperCAmelCase , atol=1E-3))
# verify image_id
a_ = torch.tensor([3_97_69])
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __UpperCAmelCase))
# verify is_crowd
a_ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __UpperCAmelCase))
# verify class_labels
a_ = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __UpperCAmelCase))
# verify masks
a_ = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __UpperCAmelCase)
# verify orig_size
a_ = torch.tensor([4_80, 6_40])
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __UpperCAmelCase))
# verify size
a_ = torch.tensor([8_00, 10_66])
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __UpperCAmelCase))
| 303
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Dict:
a_ = inspect.getfile(accelerate.test_utils)
a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"])
a_ = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ["scripts", "test_distributed_data_loop.py"])
a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_ops.py"])
@require_multi_gpu
def UpperCAmelCase__ ( self) ->Any:
print(F'''Found {torch.cuda.device_count()} devices.''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->str:
print(F'''Found {torch.cuda.device_count()} devices.''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''')
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__)]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->List[Any]:
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1"):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
if __name__ == "__main__":
UpperCamelCase_ = Accelerator()
UpperCamelCase_ = (accelerator.state.process_index + 2, 10)
UpperCamelCase_ = torch.randint(0, 10, shape).to(accelerator.device)
UpperCamelCase_ = ''
UpperCamelCase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCamelCase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCamelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 303
| 1
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 303
|
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->tuple[float | int, list[tuple[int, int]]]:
"""simple docstring"""
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) , np.inf )
a_ = 0
a_ = np.empty((rows, cols) , dtype=UpperCAmelCase )
a_ = None
while queue:
((a_) , (a_)) = heappop(UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(UpperCAmelCase ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(UpperCAmelCase , (dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Tuple = StableUnCLIPImgaImgPipeline
a_ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
a_ : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a_ : str = frozenset([] )
def UpperCAmelCase__ ( self) ->Any:
a_ = 32
a_ = embedder_hidden_size
# image encoding components
a_ = CLIPImageProcessor(crop_size=32 , size=32)
torch.manual_seed(0)
a_ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__UpperCAmelCase , projection_dim=__UpperCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ))
# regular denoising components
torch.manual_seed(0)
a_ = StableUnCLIPImageNormalizer(embedding_dim=__UpperCAmelCase)
a_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2")
torch.manual_seed(0)
a_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
torch.manual_seed(0)
a_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ))
torch.manual_seed(0)
a_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__UpperCAmelCase , layers_per_block=1 , upcast_attention=__UpperCAmelCase , use_linear_projection=__UpperCAmelCase , )
torch.manual_seed(0)
a_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0)
a_ = AutoencoderKL()
a_ = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=0 , __UpperCAmelCase=True) ->Union[str, Any]:
if str(__UpperCAmelCase).startswith("mps"):
a_ = torch.manual_seed(__UpperCAmelCase)
else:
a_ = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase)
a_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase)
if pil_image:
a_ = input_image * 0.5 + 0.5
a_ = input_image.clamp(0 , 1)
a_ = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
a_ = DiffusionPipeline.numpy_to_pil(__UpperCAmelCase)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCAmelCase__ ( self) ->int:
a_ = "cpu" # ensure determinism for the device-dependent torch.Generator
a_ = self.get_dummy_components()
a_ = StableUnCLIPImgaImgPipeline(**__UpperCAmelCase)
a_ = sd_pipe.to(__UpperCAmelCase)
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a_ = self.get_dummy_inputs(__UpperCAmelCase)
inputs.update({"image_embeds": None})
a_ = sd_pipe(**__UpperCAmelCase).images
a_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a_ = np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Any:
a_ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=__UpperCAmelCase)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCAmelCase__ ( self) ->Dict:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__UpperCAmelCase)
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png")
a_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy")
a_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa)
pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a_ = torch.Generator(device="cpu").manual_seed(0)
a_ = pipe(__UpperCAmelCase , "anime turle" , generator=__UpperCAmelCase , output_type="np")
a_ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png")
a_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy")
a_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa)
pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a_ = torch.Generator(device="cpu").manual_seed(0)
a_ = pipe(__UpperCAmelCase , "anime turle" , generator=__UpperCAmelCase , output_type="np")
a_ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png")
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa)
a_ = pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a_ = pipe(
__UpperCAmelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
a_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 303
|
"""simple docstring"""
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
UpperCamelCase_ = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
UpperCamelCase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class snake_case :
def __init__( self) ->Optional[int]:
a_ = WATERMARK_BITS
a_ = WatermarkEncoder()
self.encoder.set_watermark("bits" , self.watermark)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]:
# can't encode images that are smaller than 256
if images.shape[-1] < 2_56:
return images
a_ = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1).float().numpy()
a_ = [self.encoder.encode(__UpperCAmelCase , "dwtDct") for image in images]
a_ = torch.from_numpy(np.array(__UpperCAmelCase)).permute(0 , 3 , 1 , 2)
a_ = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0)
return images
| 303
| 1
|
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : BigBirdConfig
a_ : jnp.dtype = jnp.floataa
a_ : bool = True
def UpperCAmelCase__ ( self) ->Optional[int]:
super().setup()
a_ = nn.Dense(5 , dtype=self.dtype)
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->Optional[int]:
a_ = super().__call__(*__UpperCAmelCase , **__UpperCAmelCase)
a_ = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = FlaxBigBirdForNaturalQuestionsModule
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
def cross_entropy(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ):
a_ = logits.shape[-1]
a_ = (labels[..., None] == jnp.arange(UpperCAmelCase )[None]).astype("f4" )
a_ = jax.nn.log_softmax(UpperCAmelCase , axis=-1 )
a_ = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
a_ = reduction(UpperCAmelCase )
return loss
a_ = partial(UpperCAmelCase , reduction=jnp.mean )
a_ = cross_entropy(UpperCAmelCase , UpperCAmelCase )
a_ = cross_entropy(UpperCAmelCase , UpperCAmelCase )
a_ = cross_entropy(UpperCAmelCase , UpperCAmelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class snake_case :
a_ : str = "google/bigbird-roberta-base"
a_ : int = 3000
a_ : int = 1_0500
a_ : int = 128
a_ : int = 3
a_ : int = 1
a_ : int = 5
# tx_args
a_ : float = 3e-5
a_ : float = 0.0
a_ : int = 2_0000
a_ : float = 0.0095
a_ : str = "bigbird-roberta-natural-questions"
a_ : str = "training-expt"
a_ : str = "data/nq-training.jsonl"
a_ : str = "data/nq-validation.jsonl"
def UpperCAmelCase__ ( self) ->Dict:
os.makedirs(self.base_dir , exist_ok=__UpperCAmelCase)
a_ = os.path.join(self.base_dir , self.save_dir)
a_ = self.batch_size_per_device * jax.device_count()
@dataclass
class snake_case :
a_ : int
a_ : int = 4096 # no dynamic padding on TPUs
def __call__( self , __UpperCAmelCase) ->List[Any]:
a_ = self.collate_fn(__UpperCAmelCase)
a_ = jax.tree_util.tree_map(__UpperCAmelCase , __UpperCAmelCase)
return batch
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->str:
a_ , a_ = self.fetch_inputs(features["input_ids"])
a_ = {
"input_ids": jnp.array(__UpperCAmelCase , dtype=jnp.intaa),
"attention_mask": jnp.array(__UpperCAmelCase , dtype=jnp.intaa),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa),
}
return batch
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->int:
a_ = [self._fetch_inputs(__UpperCAmelCase) for ids in input_ids]
return zip(*__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Union[str, Any]:
a_ = [1 for _ in range(len(__UpperCAmelCase))]
while len(__UpperCAmelCase) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) ->List[str]:
"""simple docstring"""
if seed is not None:
a_ = dataset.shuffle(seed=UpperCAmelCase )
for i in range(len(UpperCAmelCase ) // batch_size ):
a_ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(UpperCAmelCase )
@partial(jax.pmap , axis_name="batch" )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
def loss_fn(UpperCAmelCase ):
a_ = model_inputs.pop("start_labels" )
a_ = model_inputs.pop("end_labels" )
a_ = model_inputs.pop("pooled_labels" )
a_ = state.apply_fn(**UpperCAmelCase , params=UpperCAmelCase , dropout_rng=UpperCAmelCase , train=UpperCAmelCase )
a_ , a_ , a_ = outputs
return state.loss_fn(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , )
a_ , a_ = jax.random.split(UpperCAmelCase )
a_ = jax.value_and_grad(UpperCAmelCase )
a_ , a_ = grad_fn(state.params )
a_ = jax.lax.pmean({"loss": loss} , axis_name="batch" )
a_ = jax.lax.pmean(UpperCAmelCase , "batch" )
a_ = state.apply_gradients(grads=UpperCAmelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="batch" )
def UpperCamelCase ( UpperCAmelCase , **UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = model_inputs.pop("start_labels" )
a_ = model_inputs.pop("end_labels" )
a_ = model_inputs.pop("pooled_labels" )
a_ = state.apply_fn(**UpperCAmelCase , params=state.params , train=UpperCAmelCase )
a_ , a_ , a_ = outputs
a_ = state.loss_fn(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
a_ = jax.lax.pmean({"loss": loss} , axis_name="batch" )
return metrics
class snake_case ( train_state.TrainState ):
a_ : Callable = struct.field(pytree_node=SCREAMING_SNAKE_CASE_ )
@dataclass
class snake_case :
a_ : Args
a_ : Callable
a_ : Callable
a_ : Callable
a_ : Callable
a_ : wandb
a_ : Callable = None
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None) ->Tuple:
a_ = model.params
a_ = TrainState.create(
apply_fn=model.__call__ , params=__UpperCAmelCase , tx=__UpperCAmelCase , loss_fn=__UpperCAmelCase , )
if ckpt_dir is not None:
a_ , a_ , a_ , a_ , a_ = restore_checkpoint(__UpperCAmelCase , __UpperCAmelCase)
a_ = {
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
a_ , a_ = build_tx(**__UpperCAmelCase)
a_ = train_state.TrainState(
step=__UpperCAmelCase , apply_fn=model.__call__ , params=__UpperCAmelCase , tx=__UpperCAmelCase , opt_state=__UpperCAmelCase , )
a_ = args
a_ = data_collator
a_ = lr
a_ = params
a_ = jax_utils.replicate(__UpperCAmelCase)
return state
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Any:
a_ = self.args
a_ = len(__UpperCAmelCase) // args.batch_size
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(__UpperCAmelCase , jax.device_count())
for epoch in range(args.max_epochs):
a_ = jnp.array(0 , dtype=jnp.floataa)
a_ = get_batched_dataset(__UpperCAmelCase , args.batch_size , seed=__UpperCAmelCase)
a_ = 0
for batch in tqdm(__UpperCAmelCase , total=__UpperCAmelCase , desc=F'''Running EPOCH-{epoch}'''):
a_ = self.data_collator(__UpperCAmelCase)
a_ , a_ , a_ = self.train_step_fn(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase)
running_loss += jax_utils.unreplicate(metrics["loss"])
i += 1
if i % args.logging_steps == 0:
a_ = jax_utils.unreplicate(state.step)
a_ = running_loss.item() / i
a_ = self.scheduler_fn(state_step - 1)
a_ = self.evaluate(__UpperCAmelCase , __UpperCAmelCase)
a_ = {
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(__UpperCAmelCase))
self.logger.log(__UpperCAmelCase , commit=__UpperCAmelCase)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'''-e{epoch}-s{i}''' , state=__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->List[str]:
a_ = get_batched_dataset(__UpperCAmelCase , self.args.batch_size)
a_ = len(__UpperCAmelCase) // self.args.batch_size
a_ = jnp.array(0 , dtype=jnp.floataa)
a_ = 0
for batch in tqdm(__UpperCAmelCase , total=__UpperCAmelCase , desc="Evaluating ... "):
a_ = self.data_collator(__UpperCAmelCase)
a_ = self.val_step_fn(__UpperCAmelCase , **__UpperCAmelCase)
running_loss += jax_utils.unreplicate(metrics["loss"])
i += 1
return running_loss / i
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->Optional[Any]:
a_ = jax_utils.unreplicate(__UpperCAmelCase)
print(F'''SAVING CHECKPOINT IN {save_dir}''' , end=" ... ")
self.model_save_fn(__UpperCAmelCase , params=state.params)
with open(os.path.join(__UpperCAmelCase , "opt_state.msgpack") , "wb") as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(__UpperCAmelCase , "args.joblib"))
joblib.dump(self.data_collator , os.path.join(__UpperCAmelCase , "data_collator.joblib"))
with open(os.path.join(__UpperCAmelCase , "training_state.json") , "w") as f:
json.dump({"step": state.step.item()} , __UpperCAmelCase)
print("DONE")
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
print(F'''RESTORING CHECKPOINT FROM {save_dir}''' , end=" ... " )
with open(os.path.join(UpperCAmelCase , "flax_model.msgpack" ) , "rb" ) as f:
a_ = from_bytes(state.params , f.read() )
with open(os.path.join(UpperCAmelCase , "opt_state.msgpack" ) , "rb" ) as f:
a_ = from_bytes(state.opt_state , f.read() )
a_ = joblib.load(os.path.join(UpperCAmelCase , "args.joblib" ) )
a_ = joblib.load(os.path.join(UpperCAmelCase , "data_collator.joblib" ) )
with open(os.path.join(UpperCAmelCase , "training_state.json" ) , "r" ) as f:
a_ = json.load(UpperCAmelCase )
a_ = training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
a_ = num_train_steps - warmup_steps
a_ = optax.linear_schedule(init_value=UpperCAmelCase , end_value=UpperCAmelCase , transition_steps=UpperCAmelCase )
a_ = optax.linear_schedule(init_value=UpperCAmelCase , end_value=1E-7 , transition_steps=UpperCAmelCase )
a_ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
def weight_decay_mask(UpperCAmelCase ):
a_ = traverse_util.flatten_dict(UpperCAmelCase )
a_ = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(UpperCAmelCase )
a_ = scheduler_fn(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
a_ = optax.adamw(learning_rate=UpperCAmelCase , weight_decay=UpperCAmelCase , mask=UpperCAmelCase )
return tx, lr
| 303
|
"""simple docstring"""
import math
UpperCamelCase_ = 10
UpperCamelCase_ = 7
UpperCamelCase_ = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCamelCase ( UpperCAmelCase = 20 ) ->str:
"""simple docstring"""
a_ = math.comb(UpperCAmelCase , UpperCAmelCase )
a_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCAmelCase )
a_ = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 303
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Tuple = """vit_mae"""
def __init__( self , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=2_24 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=16 , __UpperCAmelCase=5_12 , __UpperCAmelCase=8 , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.75 , __UpperCAmelCase=False , **__UpperCAmelCase , ) ->int:
super().__init__(**__UpperCAmelCase)
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = qkv_bias
a_ = decoder_num_attention_heads
a_ = decoder_hidden_size
a_ = decoder_num_hidden_layers
a_ = decoder_intermediate_size
a_ = mask_ratio
a_ = norm_pix_loss
| 303
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCamelCase_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
a_ = k.replace(UpperCAmelCase , UpperCAmelCase )
return k
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->PegasusForConditionalGeneration:
"""simple docstring"""
a_ = DEFAULTS.copy()
cfg_kwargs.update(UpperCAmelCase )
a_ = PegasusConfig(**UpperCAmelCase )
a_ = PegasusForConditionalGeneration(UpperCAmelCase )
a_ = torch_model.model.state_dict()
a_ = {}
for k, v in tf_weights.items():
a_ = rename_state_dict_key(UpperCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
a_ = v.T
a_ = torch.tensor(UpperCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
a_ = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
a_ = mapping["shared.weight"]
a_ = mapping["shared.weight"]
a_ = {k: torch.zeros_like(UpperCAmelCase ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**UpperCAmelCase )
a_ , a_ = torch_model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
a_ = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def UpperCamelCase ( UpperCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) ->Dict:
"""simple docstring"""
a_ = tf.train.list_variables(UpperCAmelCase )
a_ = {}
a_ = ["Adafactor", "global_step"]
for name, shape in tqdm(UpperCAmelCase , desc="converting tf checkpoint to dict" ):
a_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
a_ = tf.train.load_variable(UpperCAmelCase , UpperCAmelCase )
a_ = array
return tf_weights
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = Path(UpperCAmelCase ).parent.name
a_ = task_specific_params[F'''summarization_{dataset}''']["max_position_embeddings"]
a_ = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=UpperCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCAmelCase )
# convert model
a_ = get_tf_weights_as_numpy(UpperCAmelCase )
a_ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
a_ = task_specific_params
a_ = convert_pegasus(UpperCAmelCase , UpperCAmelCase )
torch_model.save_pretrained(UpperCAmelCase )
a_ = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(UpperCAmelCase , Path(UpperCAmelCase ) / "pytorch_model.bin" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase_ = parser.parse_args()
if args.save_dir is None:
UpperCamelCase_ = Path(args.tf_ckpt_path).parent.name
UpperCamelCase_ = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 303
| 1
|
"""simple docstring"""
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , ) ->Optional[int]:
"""simple docstring"""
if attention_mask is None:
a_ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
a_ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
a_ = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=UpperCAmelCase )
if decoder_head_mask is None:
a_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=UpperCAmelCase )
if cross_attn_head_mask is None:
a_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase="relu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=20 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , ) ->List[str]:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = encoder_layerdrop
a_ = decoder_layerdrop
a_ = max_position_embeddings
a_ = eos_token_id
a_ = pad_token_id
a_ = bos_token_id
def UpperCAmelCase__ ( self) ->str:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = self.eos_token_id # Eos Token
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
a_ = input_ids.clamp(self.pad_token_id + 1)
a_ = decoder_input_ids.clamp(self.pad_token_id + 1)
a_ = self.get_config()
a_ = prepare_mam_aaa_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
return config, inputs_dict
def UpperCAmelCase__ ( self) ->str:
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def UpperCAmelCase__ ( self) ->List[Any]:
a_ , a_ = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->Optional[int]:
a_ = MaMaaaModel(config=__UpperCAmelCase).get_decoder().to(__UpperCAmelCase).eval()
a_ = inputs_dict["input_ids"]
a_ = inputs_dict["attention_mask"]
a_ = inputs_dict["head_mask"]
# first forward pass
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase)
a_ , a_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
a_ = ids_tensor((self.batch_size, 3) , config.vocab_size)
a_ = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
a_ = torch.cat([input_ids, next_tokens] , dim=-1)
a_ = torch.cat([attention_mask, next_attn_mask] , dim=-1)
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)["last_hidden_state"]
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase)[
"last_hidden_state"
]
# select random slice
a_ = ids_tensor((1,) , output_from_past.shape[-1]).item()
a_ = output_from_no_past[:, -3:, random_slice_idx].detach()
a_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->Dict:
a_ = MaMaaaModel(config=__UpperCAmelCase).to(__UpperCAmelCase).eval()
a_ = model(**__UpperCAmelCase)
a_ = outputs.encoder_last_hidden_state
a_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
a_ = model.get_encoder()
encoder.save_pretrained(__UpperCAmelCase)
a_ = MaMaaaEncoder.from_pretrained(__UpperCAmelCase).to(__UpperCAmelCase)
a_ = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"])[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3)
with tempfile.TemporaryDirectory() as tmpdirname:
a_ = model.get_decoder()
decoder.save_pretrained(__UpperCAmelCase)
a_ = MaMaaaDecoder.from_pretrained(__UpperCAmelCase).to(__UpperCAmelCase)
a_ = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3)
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Union[str, Any] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
a_ : List[str] = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
a_ : Optional[int] = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
a_ : Union[str, Any] = True
a_ : Optional[int] = True
a_ : Dict = False
a_ : Optional[int] = False
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int:
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def UpperCAmelCase__ ( self) ->Dict:
a_ = MaMaaaModelTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->List[Any]:
a_ , a_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
a_ = model_class(__UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCAmelCase)
a_ , a_ = model_class.from_pretrained(__UpperCAmelCase , output_loading_info=__UpperCAmelCase)
self.assertEqual(info["missing_keys"] , [])
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
a_ = model_class(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = copy.deepcopy(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase))
if not self.is_encoder_decoder:
a_ = inputs["input_ids"]
del inputs["input_ids"]
else:
a_ = inputs["input_ids"]
a_ = inputs.get("decoder_input_ids" , __UpperCAmelCase)
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , __UpperCAmelCase)
a_ = model.get_input_embeddings()
if not self.is_encoder_decoder:
a_ = wte(__UpperCAmelCase)
else:
a_ = wte(__UpperCAmelCase)
a_ = wte(__UpperCAmelCase)
with torch.no_grad():
model(**__UpperCAmelCase)[0]
def UpperCAmelCase__ ( self) ->List[str]:
a_ , a_ = self.model_tester.prepare_config_and_inputs()
a_ = input_dict["input_ids"]
a_ = input_ids.ne(1).to(__UpperCAmelCase)
a_ = MaMaaaForConditionalGeneration(__UpperCAmelCase).eval().to(__UpperCAmelCase)
if torch_device == "cuda":
model.half()
model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase)
model.generate(num_beams=4 , do_sample=__UpperCAmelCase , early_stopping=__UpperCAmelCase , num_return_sequences=3)
def UpperCamelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
return torch.tensor(UpperCAmelCase , dtype=torch.long , device=UpperCAmelCase )
UpperCamelCase_ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self) ->Tuple:
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M")
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = MaMaaaModel.from_pretrained("facebook/m2m100_418M").to(__UpperCAmelCase)
a_ = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]])
a_ = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]])
a_ = prepare_mam_aaa_inputs_dict(model.config , __UpperCAmelCase , __UpperCAmelCase)
with torch.no_grad():
a_ = model(**__UpperCAmelCase)[0]
a_ = torch.Size((1, 11, 10_24))
self.assertEqual(output.shape , __UpperCAmelCase)
# change to expected output here
a_ = torch.tensor(
[[-0.7_780, -0.1_676, 0.1_038], [-6.7_556, -1.3_992, 0.0_567], [-7.5_383, -0.5_920, -0.2_779]] , device=__UpperCAmelCase)
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase))
def UpperCAmelCase__ ( self) ->Dict:
a_ = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M").to(__UpperCAmelCase)
# change to intended input
a_ = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]])
a_ = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]])
a_ = prepare_mam_aaa_inputs_dict(model.config , __UpperCAmelCase , __UpperCAmelCase)
with torch.no_grad():
a_ = model(**__UpperCAmelCase)[0]
a_ = torch.Size((1, 11, model.config.vocab_size))
self.assertEqual(output.shape , __UpperCAmelCase)
# change to expected output here
a_ = torch.tensor(
[[-1.0_448, -1.0_411, 3.7_992], [-3.2_191, -3.2_386, -1.3_451], [-3.6_210, -3.5_993, 0.4_925]] , device=__UpperCAmelCase)
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase))
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M").to(__UpperCAmelCase)
a_ = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en")
a_ = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
a_ = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="pt")
a_ = model.generate(
input_ids=dct["input_ids"].to(__UpperCAmelCase) , attention_mask=dct["attention_mask"].to(__UpperCAmelCase) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en") , )
a_ = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
a_ = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase)
assert generated == expected_en
| 303
|
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=50 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=None , ) ->Dict:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = initializer_range
a_ = use_labels
a_ = scope
def UpperCAmelCase__ ( self) ->Any:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length])
if self.use_labels:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase__ ( self) ->Optional[Any]:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self) ->List[str]:
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.prepare_config_and_inputs()
a_ = True
a_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->str:
a_ = BertGenerationEncoder(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)
a_ = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->Union[str, Any]:
a_ = True
a_ = BertGenerationEncoder(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->List[str]:
a_ = True
a_ = True
a_ = BertGenerationDecoder(config=__UpperCAmelCase).to(__UpperCAmelCase).eval()
# first forward pass
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , )
a_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a_ = ids_tensor((self.batch_size, 3) , config.vocab_size)
a_ = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
a_ = torch.cat([input_ids, next_tokens] , dim=-1)
a_ = torch.cat([input_mask, next_mask] , dim=-1)
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
# select random slice
a_ = ids_tensor((1,) , output_from_past.shape[-1]).item()
a_ = output_from_no_past[:, -3:, random_slice_idx].detach()
a_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , ) ->Tuple:
a_ = BertGenerationDecoder(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase__ ( self) ->str:
a_ , a_ , a_ , a_ = self.prepare_config_and_inputs()
a_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[str] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
a_ : Optional[int] = (BertGenerationDecoder,) if is_torch_available() else ()
a_ : List[Any] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = BertGenerationEncoderTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->Optional[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
a_ , a_ , a_ , a_ = self.model_tester.prepare_config_and_inputs()
a_ = "bert"
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
# This regression test was failing with PyTorch < 1.3
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
a_ = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->str:
a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->int:
a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
a_ = model(__UpperCAmelCase)[0]
a_ = torch.Size([1, 8, 10_24])
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->List[str]:
a_ = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
a_ = model(__UpperCAmelCase)[0]
a_ = torch.Size([1, 8, 5_03_58])
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
| 303
| 1
|
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class snake_case :
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->str:
raise NotImplementedError()
def UpperCAmelCase__ ( self) ->Optional[int]:
raise NotImplementedError()
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = False , **__UpperCAmelCase) ->int:
a_ = tokenizer
a_ = skip_prompt
a_ = decode_kwargs
# variables used in the streaming process
a_ = []
a_ = 0
a_ = True
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Union[str, Any]:
if len(value.shape) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1")
elif len(value.shape) > 1:
a_ = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
a_ = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist())
a_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs)
# After the symbol for a new line, we flush the cache.
if text.endswith("\n"):
a_ = text[self.print_len :]
a_ = []
a_ = 0
# If the last token is a CJK character, we print the characters.
elif len(__UpperCAmelCase) > 0 and self._is_chinese_char(ord(text[-1])):
a_ = text[self.print_len :]
self.print_len += len(__UpperCAmelCase)
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
a_ = text[self.print_len : text.rfind(" ") + 1]
self.print_len += len(__UpperCAmelCase)
self.on_finalized_text(__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
# Flush the cache, if it exists
if len(self.token_cache) > 0:
a_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs)
a_ = text[self.print_len :]
a_ = []
a_ = 0
else:
a_ = ""
a_ = True
self.on_finalized_text(__UpperCAmelCase , stream_end=__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = False) ->Dict:
print(__UpperCAmelCase , flush=__UpperCAmelCase , end="" if not stream_end else None)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E00 and cp <= 0X9_FFF)
or (cp >= 0X3_400 and cp <= 0X4_DBF) #
or (cp >= 0X20_000 and cp <= 0X2A_6DF) #
or (cp >= 0X2A_700 and cp <= 0X2B_73F) #
or (cp >= 0X2B_740 and cp <= 0X2B_81F) #
or (cp >= 0X2B_820 and cp <= 0X2C_EAF) #
or (cp >= 0XF_900 and cp <= 0XF_AFF)
or (cp >= 0X2F_800 and cp <= 0X2F_A1F) #
): #
return True
return False
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , **__UpperCAmelCase) ->Optional[int]:
super().__init__(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase)
a_ = Queue()
a_ = None
a_ = timeout
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = False) ->str:
self.text_queue.put(__UpperCAmelCase , timeout=self.timeout)
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout)
def __iter__( self) ->Dict:
return self
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = self.text_queue.get(timeout=self.timeout)
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 303
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
if "resnet-50" in model_name:
a_ = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
a_ = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
a_ = DetrConfig(use_timm_backbone=UpperCAmelCase , backbone_config=UpperCAmelCase )
# set label attributes
a_ = "panoptic" in model_name
if is_panoptic:
a_ = 250
else:
a_ = 91
a_ = "huggingface/label-files"
a_ = "coco-detection-id2label.json"
a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = idalabel
a_ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->Optional[Any]:
"""simple docstring"""
a_ = ""
if is_panoptic:
a_ = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[:256, :]
a_ = in_proj_bias[:256]
a_ = in_proj_weight[256:512, :]
a_ = in_proj_bias[256:512]
a_ = in_proj_weight[-256:, :]
a_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[:256, :]
a_ = in_proj_bias[:256]
a_ = in_proj_weight[256:512, :]
a_ = in_proj_bias[256:512]
a_ = in_proj_weight[-256:, :]
a_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
a_ = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a_ = in_proj_weight_cross_attn[:256, :]
a_ = in_proj_bias_cross_attn[:256]
a_ = in_proj_weight_cross_attn[256:512, :]
a_ = in_proj_bias_cross_attn[256:512]
a_ = in_proj_weight_cross_attn[-256:, :]
a_ = in_proj_bias_cross_attn[-256:]
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) ->List[str]:
"""simple docstring"""
a_ , a_ = get_detr_config(UpperCAmelCase )
# load original model from torch hub
a_ = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(F'''Converting model {model_name}...''' )
a_ = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=UpperCAmelCase ).eval()
a_ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(UpperCAmelCase ):
if is_panoptic:
a_ = "detr." + src
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCAmelCase , is_panoptic=UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a_ = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
# finally, create HuggingFace model and load state dict
a_ = DetrForSegmentation(UpperCAmelCase ) if is_panoptic else DetrForObjectDetection(UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
model.eval()
# verify our conversion on an image
a_ = "coco_panoptic" if is_panoptic else "coco_detection"
a_ = DetrImageProcessor(format=UpperCAmelCase )
a_ = processor(images=prepare_img() , return_tensors="pt" )
a_ = encoding["pixel_values"]
a_ = detr(UpperCAmelCase )
a_ = model(UpperCAmelCase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
UpperCamelCase_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 303
| 1
|
"""simple docstring"""
UpperCamelCase_ = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCamelCase_ = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCamelCase_ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 303
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
a_ = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
a_ = True if "large" in model_name or "huge" in model_name else False
a_ = True if "large" in model_name or "huge" in model_name else False
a_ = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
a_ = [3, 3, 3, 3]
a_ = [5, 5, 5, 5]
elif "fl4" in model_name:
a_ = [4, 4, 4, 4]
a_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
a_ = [3, 3, 3, 3]
if "lrf" in model_name:
a_ = [3, 3, 3, 3]
else:
a_ = [2, 2, 2, 2]
if "tiny" in model_name:
a_ = 96
elif "small" in model_name:
a_ = 96
elif "base" in model_name:
a_ = 128
elif "large" in model_name:
a_ = 192
elif "xlarge" in model_name:
a_ = 256
elif "huge" in model_name:
a_ = 352
# set label information
a_ = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
a_ = "imagenet-22k-id2label.json"
else:
a_ = "imagenet-1k-id2label.json"
a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = {v: k for k, v in idalabel.items()}
a_ = FocalNetConfig(
embed_dim=UpperCAmelCase , depths=UpperCAmelCase , focal_levels=UpperCAmelCase , focal_windows=UpperCAmelCase , use_conv_embed=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase , use_post_layernorm=UpperCAmelCase , use_layerscale=UpperCAmelCase , )
return config
def UpperCamelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
if "patch_embed.proj" in name:
a_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a_ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
a_ = "encoder." + name
if "encoder.layers" in name:
a_ = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
a_ = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
a_ = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
a_ = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
a_ = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
a_ = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
a_ = "layernorm.weight"
if name == "norm.bias":
a_ = "layernorm.bias"
if "head" in name:
a_ = name.replace("head" , "classifier" )
else:
a_ = "focalnet." + name
return name
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) ->Dict:
"""simple docstring"""
a_ = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
a_ = model_name_to_url[model_name]
print("Checkpoint URL: " , UpperCAmelCase )
a_ = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
a_ = get_focalnet_config(UpperCAmelCase )
a_ = FocalNetForImageClassification(UpperCAmelCase )
model.eval()
# load state dict
model.load_state_dict(UpperCAmelCase )
# verify conversion
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = BitImageProcessor(
do_resize=UpperCAmelCase , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase , crop_size=224 , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , )
a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
a_ = processor(images=UpperCAmelCase , return_tensors="pt" )
a_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
a_ = image_transforms(UpperCAmelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCAmelCase , atol=1E-4 )
a_ = model(**UpperCAmelCase )
a_ = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
a_ = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
a_ = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
a_ = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
a_ = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
a_ = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
a_ = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
UpperCamelCase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 303
| 1
|
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase="pt" ) ->str:
"""simple docstring"""
a_ = {"add_prefix_space": True} if isinstance(UpperCAmelCase , UpperCAmelCase ) and not line.startswith(" " ) else {}
a_ = padding_side
return tokenizer(
[line] , max_length=UpperCAmelCase , padding="max_length" if pad_to_max_length else None , truncation=UpperCAmelCase , return_tensors=UpperCAmelCase , add_special_tokens=UpperCAmelCase , **UpperCAmelCase , )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , ) ->str:
"""simple docstring"""
a_ = input_ids.ne(UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="train" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="" , ) ->Union[str, Any]:
super().__init__()
a_ = Path(__UpperCAmelCase).joinpath(type_path + ".source")
a_ = Path(__UpperCAmelCase).joinpath(type_path + ".target")
a_ = self.get_char_lens(self.src_file)
a_ = max_source_length
a_ = max_target_length
assert min(self.src_lens) > 0, F'''found empty line in {self.src_file}'''
a_ = tokenizer
a_ = prefix
if n_obs is not None:
a_ = self.src_lens[:n_obs]
a_ = src_lang
a_ = tgt_lang
def __len__( self) ->Union[str, Any]:
return len(self.src_lens)
def __getitem__( self , __UpperCAmelCase) ->Dict[str, torch.Tensor]:
a_ = index + 1 # linecache starts at 1
a_ = self.prefix + linecache.getline(str(self.src_file) , __UpperCAmelCase).rstrip("\n")
a_ = linecache.getline(str(self.tgt_file) , __UpperCAmelCase).rstrip("\n")
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __UpperCAmelCase):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
a_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __UpperCAmelCase) else self.tokenizer
)
a_ = self.tokenizer.generator if isinstance(self.tokenizer , __UpperCAmelCase) else self.tokenizer
a_ = encode_line(__UpperCAmelCase , __UpperCAmelCase , self.max_source_length , "right")
a_ = encode_line(__UpperCAmelCase , __UpperCAmelCase , self.max_target_length , "right")
a_ = source_inputs["input_ids"].squeeze()
a_ = target_inputs["input_ids"].squeeze()
a_ = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCAmelCase__ ( __UpperCAmelCase) ->Tuple:
return [len(__UpperCAmelCase) for x in Path(__UpperCAmelCase).open().readlines()]
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Dict[str, torch.Tensor]:
a_ = torch.stack([x["input_ids"] for x in batch])
a_ = torch.stack([x["attention_mask"] for x in batch])
a_ = torch.stack([x["decoder_input_ids"] for x in batch])
a_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __UpperCAmelCase)
else self.tokenizer.pad_token_id
)
a_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __UpperCAmelCase)
else self.tokenizer.pad_token_id
)
a_ = trim_batch(__UpperCAmelCase , __UpperCAmelCase)
a_ , a_ = trim_batch(__UpperCAmelCase , __UpperCAmelCase , attention_mask=__UpperCAmelCase)
a_ = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
UpperCamelCase_ = getLogger(__name__)
def UpperCamelCase ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
return list(itertools.chain.from_iterable(UpperCAmelCase ) )
def UpperCamelCase ( UpperCAmelCase ) ->None:
"""simple docstring"""
a_ = get_git_info()
save_json(UpperCAmelCase , os.path.join(UpperCAmelCase , "git_log.json" ) )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=4 , **UpperCAmelCase ) ->Tuple:
"""simple docstring"""
with open(UpperCAmelCase , "w" ) as f:
json.dump(UpperCAmelCase , UpperCAmelCase , indent=UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
with open(UpperCAmelCase ) as f:
return json.load(UpperCAmelCase )
def UpperCamelCase ( ) ->List[Any]:
"""simple docstring"""
a_ = git.Repo(search_parent_directories=UpperCAmelCase )
a_ = {
"repo_id": str(UpperCAmelCase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List:
"""simple docstring"""
return list(map(UpperCAmelCase , UpperCAmelCase ) )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
with open(UpperCAmelCase , "wb" ) as f:
return pickle.dump(UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
def remove_articles(UpperCAmelCase ):
return re.sub(r"\b(a|an|the)\b" , " " , UpperCAmelCase )
def white_space_fix(UpperCAmelCase ):
return " ".join(text.split() )
def remove_punc(UpperCAmelCase ):
a_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCAmelCase ) ) ) )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = normalize_answer(UpperCAmelCase ).split()
a_ = normalize_answer(UpperCAmelCase ).split()
a_ = Counter(UpperCAmelCase ) & Counter(UpperCAmelCase )
a_ = sum(common.values() )
if num_same == 0:
return 0
a_ = 1.0 * num_same / len(UpperCAmelCase )
a_ = 1.0 * num_same / len(UpperCAmelCase )
a_ = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
return normalize_answer(UpperCAmelCase ) == normalize_answer(UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
a_ = 0
for hypo, pred in zip(UpperCAmelCase , UpperCAmelCase ):
em += exact_match_score(UpperCAmelCase , UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
em /= len(UpperCAmelCase )
return {"em": em}
def UpperCamelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
return model_prefix.startswith("rag" )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
a_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
a_ = "dropout_rate"
for p in extra_params:
if getattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if not hasattr(UpperCAmelCase , UpperCAmelCase ) and not hasattr(UpperCAmelCase , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(UpperCAmelCase ) )
delattr(UpperCAmelCase , UpperCAmelCase )
continue
a_ = p if hasattr(UpperCAmelCase , UpperCAmelCase ) else equivalent_param[p]
setattr(UpperCAmelCase , UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
delattr(UpperCAmelCase , UpperCAmelCase )
return hparams, config
| 303
|
"""simple docstring"""
import os
import numpy
import onnx
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = a.name
a_ = b.name
a_ = ""
a_ = ""
a_ = a == b
a_ = name_a
a_ = name_b
return res
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase , UpperCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = list(model.graph.initializer )
a_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
a_ = inits[i].name
a_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = os.path.dirname(UpperCAmelCase )
a_ = os.path.basename(UpperCAmelCase )
a_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) )
a_ = list(model.graph.initializer )
a_ = set()
a_ = {}
a_ = []
a_ = 0
for i in range(len(UpperCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase )
dup_set.add(UpperCAmelCase )
a_ = inits[j].data_type
a_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , UpperCAmelCase )
total_reduced_size += mem_size
a_ = inits[i].name
a_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase )
else:
a_ = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 1_024 / 1_024 / 1_024 , "GB" )
a_ = sorted(UpperCAmelCase )
_remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
a_ = "optimized_" + model_file_name
a_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
onnx.save(UpperCAmelCase , UpperCAmelCase )
return new_model
| 303
| 1
|
"""simple docstring"""
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
UpperCamelCase_ = HfApi()
UpperCamelCase_ = {}
# fmt: off
UpperCamelCase_ = torch.tensor([
-0.75_15, -1.68_83, 0.24_20, 0.03_00, 0.63_47, 1.34_33, -1.17_43, -3.74_67,
1.23_42, -2.24_85, 0.46_36, 0.80_76, -0.79_91, 0.39_69, 0.84_98, 0.91_89,
-1.88_87, -3.35_22, 0.76_39, 0.20_40, 0.62_71, -2.71_48, -1.63_16, 3.08_39,
0.31_86, 0.27_21, -0.97_59, -1.24_61, 2.62_57, 1.35_57
])
UpperCamelCase_ = torch.tensor([
-2.36_39, -2.53_44, 0.00_54, -0.66_74, 1.59_90, 1.01_58, 0.31_24, -2.14_36,
1.87_95, -2.54_29, -0.15_66, -0.39_73, 1.24_90, 2.64_47, 1.22_83, -0.52_08,
-2.81_54, -3.51_19, 2.38_38, 1.20_33, 1.72_01, -2.12_56, -1.45_76, 2.79_48,
2.42_04, -0.97_52, -1.25_46, 0.80_27, 3.27_58, 3.13_65
])
UpperCamelCase_ = torch.tensor([
-0.65_31, -0.68_91, -0.31_72, -0.53_75, -0.91_40, -0.53_67, -0.11_75, -0.78_69,
-0.38_08, -0.45_13, -0.20_98, -0.00_83, 0.31_83, 0.51_40, 0.22_47, -0.13_04,
-0.13_02, -0.28_02, -0.20_84, -0.20_25, -0.49_67, -0.48_73, -0.08_61, 0.69_25,
0.02_50, 0.12_90, -0.15_43, 0.63_16, 1.04_60, 1.49_43
])
UpperCamelCase_ = torch.tensor([
0.09_11, 0.11_07, 0.01_82, 0.04_35, -0.08_05, -0.06_08, 0.03_81, 0.21_72,
-0.02_80, 0.13_27, -0.02_99, -0.02_55, -0.00_50, -0.11_70, -0.10_46, 0.03_09,
0.13_67, 0.17_28, -0.05_33, -0.07_48, -0.05_34, 0.16_24, 0.03_84, -0.18_05,
-0.07_07, 0.06_42, 0.02_20, -0.01_34, -0.13_33, -0.15_05
])
UpperCamelCase_ = torch.tensor([
0.13_21, 0.13_37, 0.04_40, 0.06_22, -0.05_91, -0.03_70, 0.05_03, 0.21_33,
-0.01_77, 0.14_15, -0.01_16, -0.01_12, 0.00_44, -0.09_80, -0.07_89, 0.03_95,
0.15_02, 0.17_85, -0.04_88, -0.05_14, -0.04_04, 0.15_39, 0.04_54, -0.15_59,
-0.06_65, 0.06_59, 0.03_83, -0.00_05, -0.12_66, -0.13_86
])
UpperCamelCase_ = torch.tensor([
0.11_54, 0.12_18, 0.03_07, 0.05_26, -0.07_11, -0.05_41, 0.03_66, 0.20_78,
-0.02_67, 0.13_17, -0.02_26, -0.01_93, -0.00_14, -0.10_55, -0.09_02, 0.03_30,
0.13_91, 0.17_09, -0.05_62, -0.06_93, -0.05_60, 0.14_82, 0.03_81, -0.16_83,
-0.06_81, 0.06_61, 0.03_31, -0.00_46, -0.12_68, -0.14_31
])
UpperCamelCase_ = torch.tensor([
0.11_92, 0.12_40, 0.04_14, 0.06_06, -0.05_57, -0.04_12, 0.04_30, 0.20_42,
-0.02_00, 0.13_85, -0.01_15, -0.01_32, 0.00_17, -0.09_65, -0.08_02, 0.03_98,
0.14_33, 0.17_47, -0.04_58, -0.05_33, -0.04_07, 0.15_45, 0.04_19, -0.15_74,
-0.06_45, 0.06_26, 0.03_41, -0.00_10, -0.11_99, -0.13_90
])
UpperCamelCase_ = torch.tensor([
0.10_75, 0.10_74, 0.02_05, 0.04_31, -0.07_74, -0.06_07, 0.02_98, 0.20_42,
-0.03_20, 0.12_67, -0.02_81, -0.02_50, -0.00_64, -0.10_91, -0.09_46, 0.02_90,
0.13_28, 0.16_50, -0.05_80, -0.07_38, -0.05_86, 0.14_40, 0.03_37, -0.17_46,
-0.07_12, 0.06_05, 0.02_50, -0.00_99, -0.13_16, -0.14_73
])
UpperCamelCase_ = torch.tensor([
-1.45_72, -2.04_81, -0.04_14, -0.60_05, 1.41_36, 0.58_48, 0.40_28, -2.73_30,
1.22_12, -2.12_28, 0.21_55, 0.40_39, 0.76_62, 2.05_35, 0.74_77, -0.32_43,
-2.17_58, -2.76_48, 1.69_47, 0.70_26, 1.23_38, -1.60_78, -0.86_82, 2.28_10,
1.85_74, -0.57_18, -0.55_86, -0.01_86, 2.34_15, 2.12_51])
UpperCamelCase_ = torch.tensor([
-1.36_90, -1.97_20, -0.40_90, -0.69_66, 1.46_60, 0.99_38, -0.13_85, -2.73_24,
0.77_36, -1.89_17, 0.29_23, 0.42_93, 0.16_93, 1.41_12, 1.18_87, -0.31_81,
-2.21_60, -2.63_81, 1.31_70, 0.81_63, 0.92_40, -1.65_44, -0.60_99, 2.52_59,
1.64_30, -0.90_90, -0.93_92, -0.01_26, 2.42_68, 2.32_66
])
UpperCamelCase_ = torch.tensor([
-1.35_25, -1.96_28, -0.39_56, -0.68_60, 1.46_64, 1.00_14, -0.12_59, -2.72_12,
0.77_72, -1.88_11, 0.29_96, 0.43_88, 0.17_04, 1.40_29, 1.17_01, -0.30_27,
-2.20_53, -2.62_87, 1.33_50, 0.81_31, 0.92_74, -1.62_92, -0.60_98, 2.51_31,
1.65_05, -0.89_58, -0.92_98, -0.01_51, 2.42_57, 2.33_55
])
UpperCamelCase_ = torch.tensor([
-2.05_85, -2.78_97, -0.28_50, -0.89_40, 1.90_52, 0.57_02, 0.63_45, -3.89_59,
1.59_32, -3.23_19, 0.19_74, 0.02_87, 1.75_66, 2.65_43, 0.83_87, -0.53_51,
-3.27_36, -4.33_75, 2.90_29, 1.63_90, 1.46_40, -2.17_01, -1.90_13, 2.93_41,
3.49_81, -0.62_55, -1.16_44, -0.15_91, 3.70_97, 3.20_66
])
UpperCamelCase_ = torch.tensor([
-2.31_39, -2.55_94, -0.01_97, -0.67_85, 1.70_01, 1.16_06, 0.30_75, -2.17_40,
1.80_71, -2.56_30, -0.09_26, -0.38_11, 1.21_16, 2.62_46, 1.27_31, -0.53_98,
-2.81_53, -3.61_40, 2.38_93, 1.32_62, 1.62_58, -2.18_56, -1.32_67, 2.83_95,
2.37_79, -1.06_23, -1.24_68, 0.89_59, 3.33_67, 3.22_43
])
UpperCamelCase_ = torch.tensor([
-2.06_28, -2.76_67, -0.20_89, -0.82_63, 2.05_39, 0.59_92, 0.64_95, -3.83_36,
1.60_25, -3.28_17, 0.17_21, -0.06_33, 1.75_16, 2.70_39, 0.81_00, -0.59_08,
-3.21_13, -4.43_43, 2.92_57, 1.36_32, 1.55_62, -2.14_89, -1.98_94, 3.05_60,
3.33_96, -0.73_28, -1.04_17, 0.03_83, 3.70_93, 3.23_43
])
UpperCamelCase_ = torch.tensor([
-1.45_74, -2.05_69, -0.04_73, -0.61_17, 1.40_18, 0.57_69, 0.41_29, -2.73_44,
1.22_41, -2.13_97, 0.20_00, 0.39_37, 0.76_16, 2.04_53, 0.73_24, -0.33_91,
-2.17_46, -2.77_44, 1.69_63, 0.69_21, 1.21_87, -1.61_72, -0.88_77, 2.24_39,
1.84_71, -0.58_39, -0.56_05, -0.04_64, 2.32_50, 2.12_19
])
# fmt: on
UpperCamelCase_ = api.list_models(filter='diffusers')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
UpperCamelCase_ = '/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1]
print(F"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith('CompVis'):
UpperCamelCase_ = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet')
else:
UpperCamelCase_ = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
UpperCamelCase_ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
UpperCamelCase_ = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
UpperCamelCase_ = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1E-3
)
print(F"""{mod.modelId} has passed successfully!!!""")
| 303
|
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase = "▁" , __UpperCAmelCase = True , __UpperCAmelCase = "<unk>" , __UpperCAmelCase = "</s>" , __UpperCAmelCase = "<pad>" , ) ->str:
a_ = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
a_ = [None] * len(self.special_tokens)
for token_dict in self.special_tokens.values():
a_ = token_dict["token"]
a_ = Tokenizer(Unigram())
a_ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}") , " "),
normalizers.Lowercase(),
])
a_ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase),
pre_tokenizers.Digits(individual_digits=__UpperCAmelCase),
pre_tokenizers.Punctuation(),
])
a_ = decoders.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase)
a_ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
a_ = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(__UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->Optional[Any]:
a_ = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a_ = [files]
self._tokenizer.train(__UpperCAmelCase , trainer=__UpperCAmelCase)
self.add_unk_id()
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->int:
a_ = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
self._tokenizer.train_from_iterator(__UpperCAmelCase , trainer=__UpperCAmelCase)
self.add_unk_id()
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = json.loads(self._tokenizer.to_str())
a_ = self.special_tokens["unk"]["id"]
a_ = Tokenizer.from_str(json.dumps(__UpperCAmelCase))
| 303
| 1
|
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
UpperCamelCase_ = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if args.student_type == "roberta":
a_ = False
elif args.student_type == "gpt2":
a_ = False
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
if args.student_type == "roberta":
a_ = False
def UpperCamelCase ( ) ->Optional[Any]:
"""simple docstring"""
a_ = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=UpperCAmelCase , required=UpperCAmelCase , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=UpperCAmelCase , required=UpperCAmelCase , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=UpperCAmelCase , choices=["distilbert", "roberta", "gpt2"] , required=UpperCAmelCase , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=UpperCAmelCase , required=UpperCAmelCase , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=UpperCAmelCase , type=UpperCAmelCase , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=UpperCAmelCase , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=UpperCAmelCase , required=UpperCAmelCase , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=UpperCAmelCase , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=UpperCAmelCase , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=UpperCAmelCase , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=UpperCAmelCase , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=UpperCAmelCase , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=UpperCAmelCase , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=UpperCAmelCase , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=UpperCAmelCase , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=UpperCAmelCase , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=UpperCAmelCase , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=UpperCAmelCase , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=UpperCAmelCase , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=UpperCAmelCase , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=UpperCAmelCase , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=UpperCAmelCase , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=UpperCAmelCase , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=UpperCAmelCase , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5E-4 , type=UpperCAmelCase , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1E-6 , type=UpperCAmelCase , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=UpperCAmelCase , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=UpperCAmelCase , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=UpperCAmelCase , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=UpperCAmelCase , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=UpperCAmelCase , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=UpperCAmelCase , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=UpperCAmelCase , default=500 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=UpperCAmelCase , default=4_000 , help="Checkpoint interval." )
a_ = parser.parse_args()
sanity_checks(UpperCAmelCase )
# ARGS #
init_gpu_params(UpperCAmelCase )
set_seed(UpperCAmelCase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(UpperCAmelCase ) , UpperCAmelCase , indent=4 )
git_log(args.dump_path )
a_ , a_ , a_ = MODEL_CLASSES[args.student_type]
a_ , a_ , a_ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
a_ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
a_ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
a_ = tokenizer.all_special_tokens.index(UpperCAmelCase )
a_ = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
a_ = special_tok_ids
a_ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , "rb" ) as fp:
a_ = pickle.load(UpperCAmelCase )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , "rb" ) as fp:
a_ = pickle.load(UpperCAmelCase )
a_ = np.maximum(UpperCAmelCase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
a_ = 0.0 # do not predict special tokens
a_ = torch.from_numpy(UpperCAmelCase )
else:
a_ = None
a_ = LmSeqsDataset(params=UpperCAmelCase , data=UpperCAmelCase )
logger.info("Data loader created." )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
a_ = student_config_class.from_pretrained(args.student_config )
a_ = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
a_ = student_model_class.from_pretrained(args.student_pretrained_weights , config=UpperCAmelCase )
else:
a_ = student_model_class(UpperCAmelCase )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info("Student loaded." )
# TEACHER #
a_ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=UpperCAmelCase )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(UpperCAmelCase , UpperCAmelCase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(UpperCAmelCase , UpperCAmelCase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
a_ = Distiller(
params=UpperCAmelCase , dataset=UpperCAmelCase , token_probs=UpperCAmelCase , student=UpperCAmelCase , teacher=UpperCAmelCase )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 303
|
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = hf_hub_url(repo_id=UpperCAmelCase , path=UpperCAmelCase , revision=UpperCAmelCase )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(UpperCAmelCase )}'''
| 303
| 1
|
"""simple docstring"""
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __lt__( self , __UpperCAmelCase) ->List[Any]:
return self[-1] < other[-1]
def __eq__( self , __UpperCAmelCase) ->Dict:
return self[-1] == other[-1]
def UpperCamelCase ( UpperCAmelCase ) ->list:
"""simple docstring"""
a_ = []
# sort into stacks
for element in collection:
a_ = Stack([element] )
a_ = bisect_left(UpperCAmelCase , UpperCAmelCase )
if i != len(UpperCAmelCase ):
stacks[i].append(UpperCAmelCase )
else:
stacks.append(UpperCAmelCase )
# use a heap-based merge to merge stack efficiently
a_ = merge(*(reversed(UpperCAmelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCamelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCamelCase_ = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 303
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Tuple = """audio-spectrogram-transformer"""
def __init__( self , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=10 , __UpperCAmelCase=10_24 , __UpperCAmelCase=1_28 , **__UpperCAmelCase , ) ->str:
super().__init__(**__UpperCAmelCase)
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = patch_size
a_ = qkv_bias
a_ = frequency_stride
a_ = time_stride
a_ = max_length
a_ = num_mel_bins
| 303
| 1
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def UpperCamelCase ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
return np.maximum(0 , UpperCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 303
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : str = """xlm-roberta"""
def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) ->Union[str, Any]:
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = use_cache
a_ = classifier_dropout
class snake_case ( SCREAMING_SNAKE_CASE_ ):
@property
def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
a_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 303
| 1
|
"""simple docstring"""
from numpy import exp, pi, sqrt
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = 0.0 , UpperCAmelCase = 1.0 ) ->int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
|
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=24 , __UpperCAmelCase=2 , __UpperCAmelCase=6 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=10_00 , ) ->List[str]:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = scope
a_ = range_bbox
def UpperCAmelCase__ ( self) ->int:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
a_ = bbox[i, j, 3]
a_ = bbox[i, j, 1]
a_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a_ = bbox[i, j, 2]
a_ = bbox[i, j, 0]
a_ = t
a_ = None
if self.use_input_mask:
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self) ->List[str]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Any:
a_ = LiltModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Union[str, Any]:
a_ = self.num_labels
a_ = LiltForTokenClassification(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Dict:
a_ = LiltForQuestionAnswering(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self) ->str:
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ : List[str] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : Any = False
a_ : Dict = False
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int:
return True
def UpperCAmelCase__ ( self) ->str:
a_ = LiltModelTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ = type
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->str:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->List[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = LiltModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
@slow
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__UpperCAmelCase)
a_ = torch.tensor([[1, 2]] , device=__UpperCAmelCase)
a_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__UpperCAmelCase)
# forward pass
with torch.no_grad():
a_ = model(input_ids=__UpperCAmelCase , bbox=__UpperCAmelCase)
a_ = torch.Size([1, 2, 7_68])
a_ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__UpperCAmelCase , )
self.assertTrue(outputs.last_hidden_state.shape , __UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __UpperCAmelCase , atol=1E-3))
| 303
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase_ = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['GLPNFeatureExtractor']
UpperCamelCase_ = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def UpperCAmelCase__ ( self) ->Any:
a_ = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(__UpperCAmelCase , "embed_dim"))
self.parent.assertTrue(hasattr(__UpperCAmelCase , "num_heads"))
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=[16, 48, 96] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ) ->Optional[int]:
a_ = parent
a_ = batch_size
a_ = image_size
a_ = patch_sizes
a_ = patch_stride
a_ = patch_padding
a_ = is_training
a_ = use_labels
a_ = num_labels
a_ = num_channels
a_ = embed_dim
a_ = num_heads
a_ = stride_kv
a_ = depth
a_ = cls_token
a_ = attention_drop_rate
a_ = initializer_range
a_ = layer_norm_eps
def UpperCAmelCase__ ( self) ->Any:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ = None
if self.use_labels:
# create a random int32 tensor of given shape
a_ = ids_tensor([self.batch_size] , self.num_labels)
a_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self) ->Union[str, Any]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[Any]:
a_ = TFCvtModel(config=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , training=__UpperCAmelCase)
a_ = (self.image_size, self.image_size)
a_ , a_ = image_size[0], image_size[1]
for i in range(len(self.depth)):
a_ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
a_ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str:
a_ = self.num_labels
a_ = TFCvtForImageClassification(__UpperCAmelCase)
a_ = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ = config_and_inputs
a_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Union[str, Any] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
a_ : List[Any] = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
a_ : Any = False
a_ : Dict = False
a_ : Optional[int] = False
a_ : List[Any] = False
a_ : List[Any] = False
def UpperCAmelCase__ ( self) ->List[str]:
a_ = TFCvtModelTester(self)
a_ = TFCvtConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[str]:
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions")
def UpperCAmelCase__ ( self) ->Dict:
pass
@unittest.skip(reason="Cvt does not use inputs_embeds")
def UpperCAmelCase__ ( self) ->List[str]:
pass
@unittest.skip(reason="Cvt does not support input and output embeddings")
def UpperCAmelCase__ ( self) ->Optional[Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
def UpperCAmelCase__ ( self) ->Dict:
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def UpperCAmelCase__ ( self) ->List[str]:
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8")
def UpperCAmelCase__ ( self) ->Dict:
a_ = tf.keras.mixed_precision.Policy("mixed_float16")
tf.keras.mixed_precision.set_global_policy(__UpperCAmelCase)
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32")
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(__UpperCAmelCase)
a_ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase):
a_ = model_class(__UpperCAmelCase)
a_ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase))
a_ = outputs.hidden_states
a_ = len(self.model_tester.depth)
self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->str:
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = TFCvtModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self) ->int:
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def UpperCAmelCase__ ( self) ->Any:
a_ = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=__UpperCAmelCase , return_tensors="tf")
# forward pass
a_ = model(**__UpperCAmelCase)
# verify the logits
a_ = tf.TensorShape((1, 10_00))
self.assertEqual(outputs.logits.shape , __UpperCAmelCase)
a_ = tf.constant([0.9_285, 0.9_015, -0.3_150])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __UpperCAmelCase , atol=1E-4))
| 303
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Dict = """Speech2TextFeatureExtractor"""
a_ : str = """Speech2TextTokenizer"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase) ->List[str]:
super().__init__(__UpperCAmelCase , __UpperCAmelCase)
a_ = self.feature_extractor
a_ = False
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->Optional[int]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase)
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.")
a_ = kwargs.pop("raw_speech")
else:
a_ = kwargs.pop("audio" , __UpperCAmelCase)
a_ = kwargs.pop("sampling_rate" , __UpperCAmelCase)
a_ = kwargs.pop("text" , __UpperCAmelCase)
if len(__UpperCAmelCase) > 0:
a_ = args[0]
a_ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if audio is not None:
a_ = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase)
if text is not None:
a_ = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase)
if text is None:
return inputs
elif audio is None:
return encodings
else:
a_ = encodings["input_ids"]
return inputs
def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->str:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase)
def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->int:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase)
@contextmanager
def UpperCAmelCase__ ( self) ->Tuple:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call.")
a_ = True
a_ = self.tokenizer
yield
a_ = self.feature_extractor
a_ = False
| 303
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
a_ = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=__UpperCAmelCase , cache_dir=__UpperCAmelCase)
a_ = [t[-1] for t in os.walk(os.path.join(__UpperCAmelCase , os.listdir(__UpperCAmelCase)[0] , "snapshots"))]
a_ = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin") for f in files)
@slow
@require_flax
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Tuple:
a_ , a_ = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=__UpperCAmelCase)
a_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a_ = jax.random.PRNGKey(0)
a_ = 4
a_ = jax.device_count()
a_ = num_samples * [prompt]
a_ = pipeline.prepare_inputs(__UpperCAmelCase)
# shard inputs and rng
a_ = replicate(__UpperCAmelCase)
a_ = jax.random.split(__UpperCAmelCase , __UpperCAmelCase)
a_ = shard(__UpperCAmelCase)
a_ = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 4.1_514_745) < 1E-3
assert np.abs(np.abs(__UpperCAmelCase , dtype=np.floataa).sum() - 49_947.875) < 5E-1
a_ = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
assert len(__UpperCAmelCase) == num_samples
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ , a_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=__UpperCAmelCase)
a_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a_ = jax.random.PRNGKey(0)
a_ = 50
a_ = jax.device_count()
a_ = num_samples * [prompt]
a_ = pipeline.prepare_inputs(__UpperCAmelCase)
# shard inputs and rng
a_ = replicate(__UpperCAmelCase)
a_ = jax.random.split(__UpperCAmelCase , __UpperCAmelCase)
a_ = shard(__UpperCAmelCase)
a_ = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.05_652_401)) < 1E-3
assert np.abs((np.abs(__UpperCAmelCase , dtype=np.floataa).sum() - 2_383_808.2)) < 5E-1
def UpperCAmelCase__ ( self) ->str:
a_ , a_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=__UpperCAmelCase)
a_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a_ = jax.random.PRNGKey(0)
a_ = 50
a_ = jax.device_count()
a_ = num_samples * [prompt]
a_ = pipeline.prepare_inputs(__UpperCAmelCase)
# shard inputs and rng
a_ = replicate(__UpperCAmelCase)
a_ = jax.random.split(__UpperCAmelCase , __UpperCAmelCase)
a_ = shard(__UpperCAmelCase)
a_ = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_003_906)) < 1E-3
assert np.abs((np.abs(__UpperCAmelCase , dtype=np.floataa).sum() - 2_373_516.75)) < 5E-1
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ , a_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa)
a_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a_ = jax.random.PRNGKey(0)
a_ = 50
a_ = jax.device_count()
a_ = num_samples * [prompt]
a_ = pipeline.prepare_inputs(__UpperCAmelCase)
# shard inputs and rng
a_ = replicate(__UpperCAmelCase)
a_ = jax.random.split(__UpperCAmelCase , __UpperCAmelCase)
a_ = shard(__UpperCAmelCase)
a_ = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_003_906)) < 1E-3
assert np.abs((np.abs(__UpperCAmelCase , dtype=np.floataa).sum() - 2_373_516.75)) < 5E-1
def UpperCAmelCase__ ( self) ->Tuple:
a_ = FlaxDDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , )
a_ , a_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , )
a_ = scheduler.create_state()
a_ = scheduler_state
a_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a_ = jax.random.PRNGKey(0)
a_ = 50
a_ = jax.device_count()
a_ = num_samples * [prompt]
a_ = pipeline.prepare_inputs(__UpperCAmelCase)
# shard inputs and rng
a_ = replicate(__UpperCAmelCase)
a_ = jax.random.split(__UpperCAmelCase , __UpperCAmelCase)
a_ = shard(__UpperCAmelCase)
a_ = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.045_043_945)) < 1E-3
assert np.abs((np.abs(__UpperCAmelCase , dtype=np.floataa).sum() - 2_347_693.5)) < 5E-1
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a_ = jax.device_count()
a_ = num_samples * [prompt]
a_ = jax.random.split(jax.random.PRNGKey(0) , __UpperCAmelCase)
a_ , a_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=__UpperCAmelCase , )
a_ = replicate(__UpperCAmelCase)
a_ = pipeline.prepare_inputs(__UpperCAmelCase)
a_ = shard(__UpperCAmelCase)
a_ = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
a_ = images[2, 0, 2_56, 10:17, 1]
# With memory efficient attention
a_ , a_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=__UpperCAmelCase , use_memory_efficient_attention=__UpperCAmelCase , )
a_ = replicate(__UpperCAmelCase)
a_ = pipeline.prepare_inputs(__UpperCAmelCase)
a_ = shard(__UpperCAmelCase)
a_ = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase).images
assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3)
a_ = images[2, 0, 2_56, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice).max() < 1E-2
| 303
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 1
|
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class snake_case ( nn.Module ):
def __init__( self , __UpperCAmelCase = 16 , __UpperCAmelCase = 88 , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 32 , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "geglu" , __UpperCAmelCase = None , ) ->int:
super().__init__()
a_ = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__UpperCAmelCase , attention_head_dim=__UpperCAmelCase , in_channels=__UpperCAmelCase , num_layers=__UpperCAmelCase , dropout=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , cross_attention_dim=__UpperCAmelCase , attention_bias=__UpperCAmelCase , sample_size=__UpperCAmelCase , num_vector_embeds=__UpperCAmelCase , activation_fn=__UpperCAmelCase , num_embeds_ada_norm=__UpperCAmelCase , )
for _ in range(2)
])
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
a_ = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
a_ = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
a_ = [1, 0]
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase = True , ) ->str:
a_ = hidden_states
a_ = []
a_ = 0
# attention_mask is not used yet
for i in range(2):
# for each of the two transformers, pass the corresponding condition tokens
a_ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
a_ = self.transformer_index_for_condition[i]
a_ = self.transformers[transformer_index](
__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase , cross_attention_kwargs=__UpperCAmelCase , return_dict=__UpperCAmelCase , )[0]
encoded_states.append(encoded_state - input_states)
tokens_start += self.condition_lengths[i]
a_ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
a_ = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__UpperCAmelCase)
| 303
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 1
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->Union[str, Any]:
"""simple docstring"""
a_ = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
a_ = "segformer.encoder." + key
if key.startswith("backbone" ):
a_ = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
a_ = key[key.find("patch_embed" ) + len("patch_embed" )]
a_ = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(UpperCAmelCase )-1}''' )
if "norm" in key:
a_ = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
a_ = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
a_ = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(UpperCAmelCase )-1}''' )
if "layer_norm1" in key:
a_ = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
a_ = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
a_ = key[key.find("block" ) + len("block" )]
a_ = key.replace(F'''block{idx}''' , F'''block.{int(UpperCAmelCase )-1}''' )
if "attn.q" in key:
a_ = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
a_ = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
a_ = key.replace("attn" , "attention.self" )
if "fc1" in key:
a_ = key.replace("fc1" , "dense1" )
if "fc2" in key:
a_ = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
a_ = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
a_ = key.replace("linear_fuse.conv" , "linear_fuse" )
a_ = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
a_ = key[key.find("linear_c" ) + len("linear_c" )]
a_ = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(UpperCAmelCase )-1}''' )
if key.startswith("head" ):
a_ = key.replace("head" , "classifier" )
a_ = value
return new_state_dict
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
a_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
a_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
a_ = kv_weight[
: config.hidden_sizes[i], :
]
a_ = kv_bias[: config.hidden_sizes[i]]
a_ = kv_weight[
config.hidden_sizes[i] :, :
]
a_ = kv_bias[
config.hidden_sizes[i] :
]
def UpperCamelCase ( ) ->List[Any]:
"""simple docstring"""
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return image
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
a_ = SegformerConfig()
a_ = False
# set attributes based on model_name
a_ = "huggingface/label-files"
if "segformer" in model_name:
a_ = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
a_ = 150
a_ = "ade20k-id2label.json"
a_ = (1, 150, 128, 128)
elif "city" in model_name:
a_ = 19
a_ = "cityscapes-id2label.json"
a_ = (1, 19, 128, 128)
else:
raise ValueError(F'''Model {model_name} not supported''' )
elif "mit" in model_name:
a_ = True
a_ = model_name[4:6]
a_ = 1_000
a_ = "imagenet-1k-id2label.json"
a_ = (1, 1_000)
else:
raise ValueError(F'''Model {model_name} not supported''' )
# set config attributes
a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = idalabel
a_ = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
a_ = [64, 128, 320, 512]
a_ = 256
elif size == "b2":
a_ = [64, 128, 320, 512]
a_ = 768
a_ = [3, 4, 6, 3]
elif size == "b3":
a_ = [64, 128, 320, 512]
a_ = 768
a_ = [3, 4, 18, 3]
elif size == "b4":
a_ = [64, 128, 320, 512]
a_ = 768
a_ = [3, 8, 27, 3]
elif size == "b5":
a_ = [64, 128, 320, 512]
a_ = 768
a_ = [3, 6, 40, 3]
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor (only resize + normalize)
a_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCAmelCase , align=UpperCAmelCase , do_random_crop=UpperCAmelCase )
# prepare image
a_ = prepare_img()
a_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
a_ = torch.load(UpperCAmelCase , map_location=torch.device("cpu" ) )
else:
a_ = torch.load(UpperCAmelCase , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
a_ = rename_keys(UpperCAmelCase , encoder_only=UpperCAmelCase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(UpperCAmelCase , UpperCAmelCase )
# create HuggingFace model and load state dict
if encoder_only:
a_ = False
a_ = SegformerForImageClassification(UpperCAmelCase )
else:
a_ = SegformerForSemanticSegmentation(UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
model.eval()
# forward pass
a_ = model(UpperCAmelCase )
a_ = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
a_ = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
a_ = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
a_ = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
a_ = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
a_ = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
a_ = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
a_ = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
a_ = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
a_ = torch.tensor(
[
[
[-1.1372E01, -1.2787E01, -1.3477E01],
[-1.2536E01, -1.4194E01, -1.4409E01],
[-1.3217E01, -1.4888E01, -1.5327E01],
],
[
[-1.4791E01, -1.7122E01, -1.8277E01],
[-1.7163E01, -1.9192E01, -1.9533E01],
[-1.7897E01, -1.9991E01, -2.0315E01],
],
[
[7.6723E-01, 4.1921E-01, -7.7878E-02],
[4.7772E-01, 9.5557E-03, -2.8082E-01],
[3.6032E-01, -2.4826E-01, -5.1168E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
a_ = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
a_ = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
a_ = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
a_ = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
a_ = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
a_ = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
a_ = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase , atol=1E-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
image_processor.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='segformer.b0.512x512.ade.160k',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
UpperCamelCase_ = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 303
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase_ = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 1
|
"""simple docstring"""
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class snake_case ( unittest.TestCase , SCREAMING_SNAKE_CASE_ ):
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = load_tool("text-to-speech")
self.tool.setup()
def UpperCAmelCase__ ( self) ->Tuple:
# SpeechT5 isn't deterministic
torch.manual_seed(0)
a_ = self.tool("hey")
a_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , ))
def UpperCAmelCase__ ( self) ->List[Any]:
# SpeechT5 isn't deterministic
torch.manual_seed(0)
a_ = self.tool("hey")
a_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , ))
| 303
|
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
a_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
a_ = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
a_ = F'''{src_lang}-{tgt_lang}'''
a_ = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=UpperCAmelCase , exist_ok=UpperCAmelCase )
a_ = os.path.join(UpperCAmelCase , "README.md" )
print(F'''Generating {path}''' )
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(UpperCAmelCase )
# make sure we are under the root of the project
UpperCamelCase_ = Path(__file__).resolve().parent.parent.parent
UpperCamelCase_ = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
UpperCamelCase_ = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
| 303
| 1
|
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
UpperCamelCase_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
for attribute in key.split("." ):
a_ = getattr(UpperCAmelCase , UpperCAmelCase )
if weight_type is not None:
a_ = getattr(UpperCAmelCase , UpperCAmelCase ).shape
else:
a_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
a_ = value
elif weight_type == "weight_g":
a_ = value
elif weight_type == "weight_v":
a_ = value
elif weight_type == "bias":
a_ = value
else:
a_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = []
a_ = fairseq_model.state_dict()
a_ = hf_model.feature_extractor
a_ = hf_model.adapter
for name, value in fairseq_dict.items():
a_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == "group" , )
a_ = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
a_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
a_ = True
if "*" in mapped_key:
a_ = name.split(UpperCAmelCase )[0].split("." )[-2]
a_ = mapped_key.replace("*" , UpperCAmelCase )
if "weight_g" in name:
a_ = "weight_g"
elif "weight_v" in name:
a_ = "weight_v"
elif "bias" in name:
a_ = "bias"
elif "weight" in name:
a_ = "weight"
else:
a_ = None
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
continue
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
a_ = full_name.split("conv_layers." )[-1]
a_ = name.split("." )
a_ = int(items[0] )
a_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
a_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = full_name.split("adaptor." )[-1]
a_ = name.split("." )
if items[1].isdigit():
a_ = int(items[1] )
else:
a_ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
a_ = value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
a_ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
a_ = value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
a_ = value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
a_ = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
a_ = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
a_ , a_ = emb.weight.shape
a_ = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
a_ = emb.weight.data
return lin_layer
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->List[str]:
"""simple docstring"""
a_ = WavaVecaConfig.from_pretrained(
UpperCAmelCase , add_adapter=UpperCAmelCase , adapter_stride=UpperCAmelCase , adapter_kernel_size=UpperCAmelCase , use_auth_token=UpperCAmelCase , output_hidden_size=UpperCAmelCase , )
a_ = MBartConfig.from_pretrained(UpperCAmelCase )
# load model
a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} , )
a_ = model[0].eval()
# load feature extractor
a_ = WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase , use_auth_token=UpperCAmelCase )
# set weights for wav2vec2 encoder
a_ = WavaVecaModel(UpperCAmelCase )
recursively_load_weights_wavaveca(model.encoder , UpperCAmelCase )
# load decoder weights
a_ = MBartForCausalLM(UpperCAmelCase )
a_ , a_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCAmelCase )
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
a_ = SpeechEncoderDecoderModel(encoder=UpperCAmelCase , decoder=UpperCAmelCase )
a_ = False
a_ = MBartaaTokenizer(UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
a_ = hf_wavavec.config.to_dict()
a_ = tokenizer.pad_token_id
a_ = tokenizer.bos_token_id
a_ = tokenizer.eos_token_id
a_ = "mbart50"
a_ = "wav2vec2"
a_ = tokenizer.eos_token_id
a_ = 250_004
a_ = tokenizer.eos_token_id
a_ = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase )
hf_wavavec.save_pretrained(UpperCAmelCase )
feature_extractor.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=1024, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=250004, type=int, help='`decoder_start_token_id` of model config')
UpperCamelCase_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 303
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(UpperCAmelCase , n - 1 , UpperCAmelCase ) * a) % mod
else:
a_ = binary_exponentiation(UpperCAmelCase , n / 2 , UpperCAmelCase )
return (b * b) % mod
# a prime number
UpperCamelCase_ = 701
UpperCamelCase_ = 1000000000
UpperCamelCase_ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 303
| 1
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase = 1_000_000 ) ->int:
"""simple docstring"""
a_ = 1
a_ = 1
a_ = {1: 1}
for inputa in range(2 , UpperCAmelCase ):
a_ = 0
a_ = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
a_ = (3 * number) + 1
counter += 1
if inputa not in counters:
a_ = counter
if counter > pre_counter:
a_ = inputa
a_ = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 303
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
| 303
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Optional[int] = """speech_to_text_2"""
a_ : Tuple = ["""past_key_values"""]
a_ : List[str] = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __UpperCAmelCase=1_00_00 , __UpperCAmelCase=6 , __UpperCAmelCase=20_48 , __UpperCAmelCase=4 , __UpperCAmelCase=0.0 , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=2_56 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=10_24 , **__UpperCAmelCase , ) ->Any:
a_ = vocab_size
a_ = d_model
a_ = decoder_ffn_dim
a_ = decoder_layers
a_ = decoder_attention_heads
a_ = dropout
a_ = attention_dropout
a_ = activation_dropout
a_ = activation_function
a_ = init_std
a_ = decoder_layerdrop
a_ = use_cache
a_ = decoder_layers
a_ = scale_embedding # scale factor will be sqrt(d_model) if True
a_ = max_target_positions
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
| 303
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Dict:
a_ = inspect.getfile(accelerate.test_utils)
a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"])
a_ = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ["scripts", "test_distributed_data_loop.py"])
a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_ops.py"])
@require_multi_gpu
def UpperCAmelCase__ ( self) ->Any:
print(F'''Found {torch.cuda.device_count()} devices.''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->str:
print(F'''Found {torch.cuda.device_count()} devices.''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''')
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__)]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->List[Any]:
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1"):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
if __name__ == "__main__":
UpperCamelCase_ = Accelerator()
UpperCamelCase_ = (accelerator.state.process_index + 2, 10)
UpperCamelCase_ = torch.randint(0, 10, shape).to(accelerator.device)
UpperCamelCase_ = ''
UpperCamelCase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCamelCase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCamelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 303
| 1
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCamelCase_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
a_ = k.replace(UpperCAmelCase , UpperCAmelCase )
return k
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->PegasusForConditionalGeneration:
"""simple docstring"""
a_ = DEFAULTS.copy()
cfg_kwargs.update(UpperCAmelCase )
a_ = PegasusConfig(**UpperCAmelCase )
a_ = PegasusForConditionalGeneration(UpperCAmelCase )
a_ = torch_model.model.state_dict()
a_ = {}
for k, v in tf_weights.items():
a_ = rename_state_dict_key(UpperCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
a_ = v.T
a_ = torch.tensor(UpperCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
a_ = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
a_ = mapping["shared.weight"]
a_ = mapping["shared.weight"]
a_ = {k: torch.zeros_like(UpperCAmelCase ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**UpperCAmelCase )
a_ , a_ = torch_model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
a_ = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def UpperCamelCase ( UpperCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) ->Dict:
"""simple docstring"""
a_ = tf.train.list_variables(UpperCAmelCase )
a_ = {}
a_ = ["Adafactor", "global_step"]
for name, shape in tqdm(UpperCAmelCase , desc="converting tf checkpoint to dict" ):
a_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
a_ = tf.train.load_variable(UpperCAmelCase , UpperCAmelCase )
a_ = array
return tf_weights
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = Path(UpperCAmelCase ).parent.name
a_ = task_specific_params[F'''summarization_{dataset}''']["max_position_embeddings"]
a_ = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=UpperCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCAmelCase )
# convert model
a_ = get_tf_weights_as_numpy(UpperCAmelCase )
a_ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
a_ = task_specific_params
a_ = convert_pegasus(UpperCAmelCase , UpperCAmelCase )
torch_model.save_pretrained(UpperCAmelCase )
a_ = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(UpperCAmelCase , Path(UpperCAmelCase ) / "pytorch_model.bin" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase_ = parser.parse_args()
if args.save_dir is None:
UpperCamelCase_ = Path(args.tf_ckpt_path).parent.name
UpperCamelCase_ = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 303
|
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->tuple[float | int, list[tuple[int, int]]]:
"""simple docstring"""
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) , np.inf )
a_ = 0
a_ = np.empty((rows, cols) , dtype=UpperCAmelCase )
a_ = None
while queue:
((a_) , (a_)) = heappop(UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(UpperCAmelCase ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(UpperCAmelCase , (dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def UpperCAmelCase__ ( self) ->int:
a_ = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(__UpperCAmelCase , "hidden_sizes"))
self.parent.assertTrue(hasattr(__UpperCAmelCase , "num_attention_heads"))
self.parent.assertTrue(hasattr(__UpperCAmelCase , "num_encoder_blocks"))
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[8, 4, 2, 1] , __UpperCAmelCase=[16, 32, 64, 1_28] , __UpperCAmelCase=[1, 4, 8, 16] , __UpperCAmelCase=[1, 2, 4, 8] , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=None , ) ->str:
a_ = parent
a_ = batch_size
a_ = image_size
a_ = num_channels
a_ = num_encoder_blocks
a_ = sr_ratios
a_ = depths
a_ = hidden_sizes
a_ = downsampling_rates
a_ = num_attention_heads
a_ = is_training
a_ = use_labels
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = num_labels
a_ = scope
def UpperCAmelCase__ ( self) ->List[str]:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
a_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self) ->Optional[Any]:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Any:
a_ = SegformerModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase)
a_ = a_ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int:
a_ = self.num_labels
a_ = SegformerForSemanticSegmentation(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
a_ = model(__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
self.parent.assertGreater(result.loss , 0.0)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Union[str, Any]:
a_ = 1
a_ = SegformerForSemanticSegmentation(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size)).to(__UpperCAmelCase)
a_ = model(__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertGreater(result.loss , 0.0)
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ = config_and_inputs
a_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : str = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
a_ : Any = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a_ : Optional[int] = True
a_ : List[Any] = False
a_ : int = False
a_ : Dict = False
def UpperCAmelCase__ ( self) ->int:
a_ = SegformerModelTester(self)
a_ = SegformerConfigTester(self , config_class=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->str:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__UpperCAmelCase)
@unittest.skip("SegFormer does not use inputs_embeds")
def UpperCAmelCase__ ( self) ->Optional[int]:
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods")
def UpperCAmelCase__ ( self) ->str:
pass
def UpperCAmelCase__ ( self) ->Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(__UpperCAmelCase)
a_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = True
for model_class in self.all_model_classes:
a_ = True
a_ = False
a_ = True
a_ = model_class(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
with torch.no_grad():
a_ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase))
a_ = outputs.attentions
a_ = sum(self.model_tester.depths)
self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a_ = True
a_ = model_class(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
with torch.no_grad():
a_ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase))
a_ = outputs.attentions
self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase)
# verify the first attentions (first block, first layer)
a_ = (self.model_tester.image_size // 4) ** 2
a_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
a_ = (self.model_tester.image_size // 32) ** 2
a_ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:]) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
a_ = len(__UpperCAmelCase)
# Check attention is always last and order is fine
a_ = True
a_ = True
a_ = model_class(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
with torch.no_grad():
a_ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase))
self.assertEqual(out_len + 1 , len(__UpperCAmelCase))
a_ = outputs.attentions
self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase)
# verify the first attentions (first block, first layer)
a_ = (self.model_tester.image_size // 4) ** 2
a_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def UpperCAmelCase__ ( self) ->Optional[Any]:
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase):
a_ = model_class(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
with torch.no_grad():
a_ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase))
a_ = outputs.hidden_states
a_ = self.model_tester.num_encoder_blocks
self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[Any]:
if not self.model_tester.is_training:
return
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCAmelCase):
continue
a_ = model_class(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.train()
a_ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase)
a_ = model(**__UpperCAmelCase).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def UpperCAmelCase__ ( self) ->List[Any]:
pass
@slow
def UpperCAmelCase__ ( self) ->str:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = SegformerModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
def UpperCamelCase ( ) ->List[Any]:
"""simple docstring"""
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->Dict:
# only resize + normalize
a_ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=__UpperCAmelCase , align=__UpperCAmelCase , do_random_crop=__UpperCAmelCase)
a_ = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to(
__UpperCAmelCase)
a_ = prepare_img()
a_ = image_processor(images=__UpperCAmelCase , return_tensors="pt")
a_ = encoded_inputs.pixel_values.to(__UpperCAmelCase)
with torch.no_grad():
a_ = model(__UpperCAmelCase)
a_ = torch.Size((1, model.config.num_labels, 1_28, 1_28))
self.assertEqual(outputs.logits.shape , __UpperCAmelCase)
a_ = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
]).to(__UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-4))
@slow
def UpperCAmelCase__ ( self) ->List[Any]:
# only resize + normalize
a_ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=__UpperCAmelCase , align=__UpperCAmelCase , do_random_crop=__UpperCAmelCase)
a_ = SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024").to(__UpperCAmelCase)
a_ = prepare_img()
a_ = image_processor(images=__UpperCAmelCase , return_tensors="pt")
a_ = encoded_inputs.pixel_values.to(__UpperCAmelCase)
with torch.no_grad():
a_ = model(__UpperCAmelCase)
a_ = torch.Size((1, model.config.num_labels, 1_28, 1_28))
self.assertEqual(outputs.logits.shape , __UpperCAmelCase)
a_ = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
]).to(__UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-1))
@slow
def UpperCAmelCase__ ( self) ->Dict:
# only resize + normalize
a_ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=__UpperCAmelCase , align=__UpperCAmelCase , do_random_crop=__UpperCAmelCase)
a_ = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to(
__UpperCAmelCase)
a_ = prepare_img()
a_ = image_processor(images=__UpperCAmelCase , return_tensors="pt")
a_ = encoded_inputs.pixel_values.to(__UpperCAmelCase)
with torch.no_grad():
a_ = model(__UpperCAmelCase)
a_ = outputs.logits.detach().cpu()
a_ = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(5_00, 3_00)])
a_ = torch.Size((5_00, 3_00))
self.assertEqual(segmentation[0].shape , __UpperCAmelCase)
a_ = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase)
a_ = torch.Size((1_28, 1_28))
self.assertEqual(segmentation[0].shape , __UpperCAmelCase)
| 303
|
"""simple docstring"""
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
UpperCamelCase_ = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
UpperCamelCase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class snake_case :
def __init__( self) ->Optional[int]:
a_ = WATERMARK_BITS
a_ = WatermarkEncoder()
self.encoder.set_watermark("bits" , self.watermark)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]:
# can't encode images that are smaller than 256
if images.shape[-1] < 2_56:
return images
a_ = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1).float().numpy()
a_ = [self.encoder.encode(__UpperCAmelCase , "dwtDct") for image in images]
a_ = torch.from_numpy(np.array(__UpperCAmelCase)).permute(0 , 3 , 1 , 2)
a_ = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0)
return images
| 303
| 1
|
"""simple docstring"""
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="attention" ) ->List[Any]:
"""simple docstring"""
a_ = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
a_ = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
a_ = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
a_ = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) ->List[str]:
"""simple docstring"""
if split_mlp_wi:
a_ = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
a_ = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
a_ = (wi_a, wi_a)
else:
a_ = params[F'''{prefix}/layers_{i}/mlp/wi/kernel''']
a_ = params[F'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
return params[F'''{prefix}/layers_{i}/{layer_name}/scale''']
def UpperCamelCase ( UpperCAmelCase , *, UpperCAmelCase , UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = traverse_util.flatten_dict(variables["target"] )
a_ = {"/".join(UpperCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
a_ = "encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:" , UpperCAmelCase )
a_ = collections.OrderedDict()
# Shared embeddings.
a_ = old["token_embedder/embedding"]
# Encoder.
for i in range(UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
a_ = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , "encoder" , "pre_attention_layer_norm" )
a_ , a_ , a_ , a_ = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , "encoder" , "attention" )
a_ = layer_norm
a_ = k.T
a_ = o.T
a_ = q.T
a_ = v.T
# Block i, layer 1 (MLP).
a_ = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , "encoder" , "pre_mlp_layer_norm" )
a_ , a_ = tax_mlp_lookup(UpperCAmelCase , UpperCAmelCase , "encoder" , UpperCAmelCase )
a_ = layer_norm
if split_mlp_wi:
a_ = wi[0].T
a_ = wi[1].T
else:
a_ = wi.T
a_ = wo.T
a_ = old[
"encoder/relpos_bias/rel_embedding"
].T
a_ = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
a_ = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , "decoder" , "pre_self_attention_layer_norm" )
a_ , a_ , a_ , a_ = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , "decoder" , "self_attention" )
a_ = layer_norm
a_ = k.T
a_ = o.T
a_ = q.T
a_ = v.T
# Block i, layer 1 (Cross Attention).
a_ = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , "decoder" , "pre_cross_attention_layer_norm" )
a_ , a_ , a_ , a_ = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , "decoder" , "encoder_decoder_attention" )
a_ = layer_norm
a_ = k.T
a_ = o.T
a_ = q.T
a_ = v.T
# Block i, layer 2 (MLP).
a_ = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , "decoder" , "pre_mlp_layer_norm" )
a_ , a_ = tax_mlp_lookup(UpperCAmelCase , UpperCAmelCase , "decoder" , UpperCAmelCase )
a_ = layer_norm
if split_mlp_wi:
a_ = wi[0].T
a_ = wi[1].T
else:
a_ = wi.T
a_ = wo.T
a_ = old["decoder/decoder_norm/scale"]
a_ = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
a_ = old["decoder/logits_dense/kernel"].T
return new
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
a_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
a_ = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
a_ = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
a_ = state_dict["shared.weight"]
return state_dict
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = checkpoints.load_tax_checkpoint(UpperCAmelCase )
a_ = convert_tax_to_pytorch(UpperCAmelCase , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase )
a_ = make_state_dict(UpperCAmelCase , UpperCAmelCase )
model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False ) ->int:
"""simple docstring"""
a_ = TaConfig.from_json_file(UpperCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
a_ = TaEncoderModel(UpperCAmelCase )
else:
a_ = TaForConditionalGeneration(UpperCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCAmelCase )
print("Done" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
UpperCamelCase_ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 303
|
"""simple docstring"""
import math
UpperCamelCase_ = 10
UpperCamelCase_ = 7
UpperCamelCase_ = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCamelCase ( UpperCAmelCase = 20 ) ->str:
"""simple docstring"""
a_ = math.comb(UpperCAmelCase , UpperCAmelCase )
a_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCAmelCase )
a_ = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 303
| 1
|
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCamelCase_ = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
inspect_dataset(UpperCAmelCase , UpperCAmelCase )
a_ = path + ".py"
assert script_name in os.listdir(UpperCAmelCase )
assert "__pycache__" not in os.listdir(UpperCAmelCase )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
inspect_metric(UpperCAmelCase , UpperCAmelCase )
a_ = path + ".py"
assert script_name in os.listdir(UpperCAmelCase )
assert "__pycache__" not in os.listdir(UpperCAmelCase )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
a_ = get_dataset_config_info(UpperCAmelCase , config_name=UpperCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
with pytest.raises(UpperCAmelCase ):
get_dataset_config_info(UpperCAmelCase , config_name=UpperCAmelCase )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
a_ = get_dataset_config_names(UpperCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
a_ = get_dataset_infos(UpperCAmelCase )
assert list(infos.keys() ) == expected_configs
a_ = expected_configs[0]
assert expected_config in infos
a_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
a_ = get_dataset_infos(UpperCAmelCase )
assert expected_config in infos
a_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
with pytest.raises(UpperCAmelCase ):
get_dataset_split_names(UpperCAmelCase , config_name=UpperCAmelCase )
| 303
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCamelCase_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
a_ = k.replace(UpperCAmelCase , UpperCAmelCase )
return k
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->PegasusForConditionalGeneration:
"""simple docstring"""
a_ = DEFAULTS.copy()
cfg_kwargs.update(UpperCAmelCase )
a_ = PegasusConfig(**UpperCAmelCase )
a_ = PegasusForConditionalGeneration(UpperCAmelCase )
a_ = torch_model.model.state_dict()
a_ = {}
for k, v in tf_weights.items():
a_ = rename_state_dict_key(UpperCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
a_ = v.T
a_ = torch.tensor(UpperCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
a_ = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
a_ = mapping["shared.weight"]
a_ = mapping["shared.weight"]
a_ = {k: torch.zeros_like(UpperCAmelCase ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**UpperCAmelCase )
a_ , a_ = torch_model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
a_ = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def UpperCamelCase ( UpperCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) ->Dict:
"""simple docstring"""
a_ = tf.train.list_variables(UpperCAmelCase )
a_ = {}
a_ = ["Adafactor", "global_step"]
for name, shape in tqdm(UpperCAmelCase , desc="converting tf checkpoint to dict" ):
a_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
a_ = tf.train.load_variable(UpperCAmelCase , UpperCAmelCase )
a_ = array
return tf_weights
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = Path(UpperCAmelCase ).parent.name
a_ = task_specific_params[F'''summarization_{dataset}''']["max_position_embeddings"]
a_ = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=UpperCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCAmelCase )
# convert model
a_ = get_tf_weights_as_numpy(UpperCAmelCase )
a_ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
a_ = task_specific_params
a_ = convert_pegasus(UpperCAmelCase , UpperCAmelCase )
torch_model.save_pretrained(UpperCAmelCase )
a_ = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(UpperCAmelCase , Path(UpperCAmelCase ) / "pytorch_model.bin" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase_ = parser.parse_args()
if args.save_dir is None:
UpperCamelCase_ = Path(args.tf_ckpt_path).parent.name
UpperCamelCase_ = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 303
| 1
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : List[Any] = """unispeech"""
def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase=False , __UpperCAmelCase=1_28 , __UpperCAmelCase=16 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase=3_20 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1_00 , __UpperCAmelCase=2_56 , __UpperCAmelCase=2_56 , __UpperCAmelCase=0.1 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=2_56 , __UpperCAmelCase=80 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=0.5 , **__UpperCAmelCase , ) ->List[Any]:
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase)
a_ = hidden_size
a_ = feat_extract_norm
a_ = feat_extract_activation
a_ = list(__UpperCAmelCase)
a_ = list(__UpperCAmelCase)
a_ = list(__UpperCAmelCase)
a_ = conv_bias
a_ = num_conv_pos_embeddings
a_ = num_conv_pos_embedding_groups
a_ = len(self.conv_dim)
a_ = num_hidden_layers
a_ = intermediate_size
a_ = hidden_act
a_ = num_attention_heads
a_ = hidden_dropout
a_ = attention_dropout
a_ = activation_dropout
a_ = feat_proj_dropout
a_ = final_dropout
a_ = layerdrop
a_ = layer_norm_eps
a_ = initializer_range
a_ = num_ctc_classes
a_ = vocab_size
a_ = do_stable_layer_norm
a_ = use_weighted_layer_sum
a_ = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a_ = apply_spec_augment
a_ = mask_time_prob
a_ = mask_time_length
a_ = mask_time_min_masks
a_ = mask_feature_prob
a_ = mask_feature_length
a_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
a_ = num_codevectors_per_group
a_ = num_codevector_groups
a_ = contrastive_logits_temperature
a_ = feat_quantizer_dropout
a_ = num_negatives
a_ = codevector_dim
a_ = proj_codevector_dim
a_ = diversity_loss_weight
# ctc loss
a_ = ctc_loss_reduction
a_ = ctc_zero_infinity
# pretraining loss
a_ = replace_prob
@property
def UpperCAmelCase__ ( self) ->str:
return functools.reduce(operator.mul , self.conv_stride , 1)
| 303
|
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=50 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=None , ) ->Dict:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = initializer_range
a_ = use_labels
a_ = scope
def UpperCAmelCase__ ( self) ->Any:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length])
if self.use_labels:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase__ ( self) ->Optional[Any]:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self) ->List[str]:
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.prepare_config_and_inputs()
a_ = True
a_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->str:
a_ = BertGenerationEncoder(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)
a_ = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->Union[str, Any]:
a_ = True
a_ = BertGenerationEncoder(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->List[str]:
a_ = True
a_ = True
a_ = BertGenerationDecoder(config=__UpperCAmelCase).to(__UpperCAmelCase).eval()
# first forward pass
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , )
a_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a_ = ids_tensor((self.batch_size, 3) , config.vocab_size)
a_ = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
a_ = torch.cat([input_ids, next_tokens] , dim=-1)
a_ = torch.cat([input_mask, next_mask] , dim=-1)
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
# select random slice
a_ = ids_tensor((1,) , output_from_past.shape[-1]).item()
a_ = output_from_no_past[:, -3:, random_slice_idx].detach()
a_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , ) ->Tuple:
a_ = BertGenerationDecoder(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase__ ( self) ->str:
a_ , a_ , a_ , a_ = self.prepare_config_and_inputs()
a_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[str] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
a_ : Optional[int] = (BertGenerationDecoder,) if is_torch_available() else ()
a_ : List[Any] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = BertGenerationEncoderTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->Optional[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
a_ , a_ , a_ , a_ = self.model_tester.prepare_config_and_inputs()
a_ = "bert"
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
# This regression test was failing with PyTorch < 1.3
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
a_ = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->str:
a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->int:
a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
a_ = model(__UpperCAmelCase)[0]
a_ = torch.Size([1, 8, 10_24])
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->List[str]:
a_ = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
a_ = model(__UpperCAmelCase)[0]
a_ = torch.Size([1, 8, 5_03_58])
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
| 303
| 1
|
"""simple docstring"""
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def UpperCamelCase ( UpperCAmelCase ) ->np.ndarray:
"""simple docstring"""
return input_array.reshape((input_array.size, 1) )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->np.ndarray:
"""simple docstring"""
a_ = np.nan
for i in range(UpperCAmelCase ):
a_ = features[:, labels == i]
a_ = data.mean(1 )
# Centralize the data of class i
a_ = data - column_reshape(UpperCAmelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(UpperCAmelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
a_ = np.dot(UpperCAmelCase , centered_data.T )
return covariance_sum / features.shape[1]
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->np.ndarray:
"""simple docstring"""
a_ = features.mean(1 )
a_ = np.nan
for i in range(UpperCAmelCase ):
a_ = features[:, labels == i]
a_ = data.shape[1]
a_ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase ) , (column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
a_ = device_data * np.dot(
column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase ) , (column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase )).T , )
return covariance_sum / features.shape[1]
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->np.ndarray:
"""simple docstring"""
if features.any():
a_ = features.mean(1 )
# Center the dataset
a_ = features - np.reshape(UpperCAmelCase , (data_mean.size, 1) )
a_ = np.dot(UpperCAmelCase , centered_data.T ) / features.shape[1]
a_ , a_ = np.linalg.eigh(UpperCAmelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
a_ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
a_ = np.dot(filtered_eigenvectors.T , UpperCAmelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCAmelCase )
logging.error("Dataset empty" )
raise AssertionError
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->np.ndarray:
"""simple docstring"""
assert classes > dimensions
# Check if features have been already loaded
if features.any:
a_ , a_ = eigh(
covariance_between_classes(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , covariance_within_classes(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
a_ = eigenvectors[:, ::-1][:, :dimensions]
a_ , a_ , a_ = np.linalg.svd(UpperCAmelCase )
a_ = svd_matrix[:, 0:dimensions]
a_ = np.dot(filtered_svd_matrix.T , UpperCAmelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCAmelCase )
logging.error("Dataset empty" )
raise AssertionError
def UpperCamelCase ( ) ->None:
"""simple docstring"""
a_ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
a_ = np.array([0, 0, 0, 1, 1] )
a_ = 2
a_ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(UpperCAmelCase ) as error_info:
a_ = linear_discriminant_analysis(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if isinstance(UpperCAmelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def UpperCamelCase ( ) ->None:
"""simple docstring"""
a_ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
a_ = 2
a_ = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(UpperCAmelCase ) as error_info:
a_ = principal_component_analysis(UpperCAmelCase , UpperCAmelCase )
if not np.allclose(UpperCAmelCase , UpperCAmelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
if "resnet-50" in model_name:
a_ = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
a_ = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
a_ = DetrConfig(use_timm_backbone=UpperCAmelCase , backbone_config=UpperCAmelCase )
# set label attributes
a_ = "panoptic" in model_name
if is_panoptic:
a_ = 250
else:
a_ = 91
a_ = "huggingface/label-files"
a_ = "coco-detection-id2label.json"
a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = idalabel
a_ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->Optional[Any]:
"""simple docstring"""
a_ = ""
if is_panoptic:
a_ = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[:256, :]
a_ = in_proj_bias[:256]
a_ = in_proj_weight[256:512, :]
a_ = in_proj_bias[256:512]
a_ = in_proj_weight[-256:, :]
a_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[:256, :]
a_ = in_proj_bias[:256]
a_ = in_proj_weight[256:512, :]
a_ = in_proj_bias[256:512]
a_ = in_proj_weight[-256:, :]
a_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
a_ = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a_ = in_proj_weight_cross_attn[:256, :]
a_ = in_proj_bias_cross_attn[:256]
a_ = in_proj_weight_cross_attn[256:512, :]
a_ = in_proj_bias_cross_attn[256:512]
a_ = in_proj_weight_cross_attn[-256:, :]
a_ = in_proj_bias_cross_attn[-256:]
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) ->List[str]:
"""simple docstring"""
a_ , a_ = get_detr_config(UpperCAmelCase )
# load original model from torch hub
a_ = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(F'''Converting model {model_name}...''' )
a_ = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=UpperCAmelCase ).eval()
a_ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(UpperCAmelCase ):
if is_panoptic:
a_ = "detr." + src
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCAmelCase , is_panoptic=UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a_ = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
# finally, create HuggingFace model and load state dict
a_ = DetrForSegmentation(UpperCAmelCase ) if is_panoptic else DetrForObjectDetection(UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
model.eval()
# verify our conversion on an image
a_ = "coco_panoptic" if is_panoptic else "coco_detection"
a_ = DetrImageProcessor(format=UpperCAmelCase )
a_ = processor(images=prepare_img() , return_tensors="pt" )
a_ = encoding["pixel_values"]
a_ = detr(UpperCAmelCase )
a_ = model(UpperCAmelCase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
UpperCamelCase_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 303
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->None:
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
| 303
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
a_ = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
a_ = True if "large" in model_name or "huge" in model_name else False
a_ = True if "large" in model_name or "huge" in model_name else False
a_ = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
a_ = [3, 3, 3, 3]
a_ = [5, 5, 5, 5]
elif "fl4" in model_name:
a_ = [4, 4, 4, 4]
a_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
a_ = [3, 3, 3, 3]
if "lrf" in model_name:
a_ = [3, 3, 3, 3]
else:
a_ = [2, 2, 2, 2]
if "tiny" in model_name:
a_ = 96
elif "small" in model_name:
a_ = 96
elif "base" in model_name:
a_ = 128
elif "large" in model_name:
a_ = 192
elif "xlarge" in model_name:
a_ = 256
elif "huge" in model_name:
a_ = 352
# set label information
a_ = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
a_ = "imagenet-22k-id2label.json"
else:
a_ = "imagenet-1k-id2label.json"
a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = {v: k for k, v in idalabel.items()}
a_ = FocalNetConfig(
embed_dim=UpperCAmelCase , depths=UpperCAmelCase , focal_levels=UpperCAmelCase , focal_windows=UpperCAmelCase , use_conv_embed=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase , use_post_layernorm=UpperCAmelCase , use_layerscale=UpperCAmelCase , )
return config
def UpperCamelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
if "patch_embed.proj" in name:
a_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a_ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
a_ = "encoder." + name
if "encoder.layers" in name:
a_ = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
a_ = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
a_ = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
a_ = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
a_ = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
a_ = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
a_ = "layernorm.weight"
if name == "norm.bias":
a_ = "layernorm.bias"
if "head" in name:
a_ = name.replace("head" , "classifier" )
else:
a_ = "focalnet." + name
return name
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) ->Dict:
"""simple docstring"""
a_ = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
a_ = model_name_to_url[model_name]
print("Checkpoint URL: " , UpperCAmelCase )
a_ = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
a_ = get_focalnet_config(UpperCAmelCase )
a_ = FocalNetForImageClassification(UpperCAmelCase )
model.eval()
# load state dict
model.load_state_dict(UpperCAmelCase )
# verify conversion
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = BitImageProcessor(
do_resize=UpperCAmelCase , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase , crop_size=224 , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , )
a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
a_ = processor(images=UpperCAmelCase , return_tensors="pt" )
a_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
a_ = image_transforms(UpperCAmelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCAmelCase , atol=1E-4 )
a_ = model(**UpperCAmelCase )
a_ = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
a_ = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
a_ = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
a_ = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
a_ = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
a_ = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
a_ = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
UpperCamelCase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 303
| 1
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def UpperCamelCase ( ) ->None:
"""simple docstring"""
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 303
|
"""simple docstring"""
import os
import numpy
import onnx
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = a.name
a_ = b.name
a_ = ""
a_ = ""
a_ = a == b
a_ = name_a
a_ = name_b
return res
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase , UpperCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = list(model.graph.initializer )
a_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
a_ = inits[i].name
a_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = os.path.dirname(UpperCAmelCase )
a_ = os.path.basename(UpperCAmelCase )
a_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) )
a_ = list(model.graph.initializer )
a_ = set()
a_ = {}
a_ = []
a_ = 0
for i in range(len(UpperCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase )
dup_set.add(UpperCAmelCase )
a_ = inits[j].data_type
a_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , UpperCAmelCase )
total_reduced_size += mem_size
a_ = inits[i].name
a_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase )
else:
a_ = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 1_024 / 1_024 / 1_024 , "GB" )
a_ = sorted(UpperCAmelCase )
_remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
a_ = "optimized_" + model_file_name
a_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
onnx.save(UpperCAmelCase , UpperCAmelCase )
return new_model
| 303
| 1
|
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCamelCase_ = 16
UpperCamelCase_ = 32
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = 16 , UpperCAmelCase = "bert-base-cased" ) ->Optional[Any]:
"""simple docstring"""
a_ = AutoTokenizer.from_pretrained(UpperCAmelCase )
a_ = load_dataset("glue" , "mrpc" )
def tokenize_function(UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
a_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCAmelCase , max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
a_ = datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=UpperCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(UpperCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
a_ = DataLoader(
tokenized_datasets["train"] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
a_ = DataLoader(
tokenized_datasets["validation"] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
return train_dataloader, eval_dataloader
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->str:
"""simple docstring"""
model.eval()
a_ = 0
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a_ = model(**UpperCAmelCase )
a_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
a_ , a_ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase ) - 1:
a_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
a_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase , references=UpperCAmelCase , )
a_ = metric.compute()
return eval_metric["accuracy"]
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
a_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a_ = config["lr"]
a_ = int(config["num_epochs"] )
a_ = int(config["seed"] )
a_ = int(config["batch_size"] )
a_ = args.model_name_or_path
set_seed(UpperCAmelCase )
a_ , a_ = get_dataloaders(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a_ = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase , return_dict=UpperCAmelCase )
# Instantiate optimizer
a_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
a_ = optimizer_cls(params=model.parameters() , lr=UpperCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
a_ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
a_ = 1
a_ = (len(UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
a_ = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase , num_warmup_steps=0 , num_training_steps=UpperCAmelCase , )
else:
a_ = DummyScheduler(UpperCAmelCase , total_num_steps=UpperCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a_ , a_ , a_ , a_ , a_ = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
a_ = 0
# We also need to keep track of the stating epoch so files are named properly
a_ = 0
a_ = evaluate.load("glue" , "mrpc" )
a_ = num_epochs
if args.partial_train_epoch is not None:
a_ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
a_ = args.resume_from_checkpoint.split("epoch_" )[1]
a_ = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
a_ = int(UpperCAmelCase ) + 1
a_ = evaluation_loop(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
accelerator.print("resumed checkpoint performance:" , UpperCAmelCase )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , F'''state_{starting_epoch-1}.json''' ) , "r" ) as f:
a_ = json.load(UpperCAmelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
a_ = {}
for epoch in range(UpperCAmelCase , UpperCAmelCase ):
model.train()
for step, batch in enumerate(UpperCAmelCase ):
a_ = model(**UpperCAmelCase )
a_ = outputs.loss
a_ = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
a_ = F'''epoch_{epoch}'''
a_ = os.path.join(args.output_dir , UpperCAmelCase )
accelerator.save_state(UpperCAmelCase )
a_ = evaluation_loop(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
a_ = accuracy
a_ = lr_scheduler.get_lr()[0]
a_ = optimizer.param_groups[0]["lr"]
a_ = epoch
a_ = overall_step
accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'''state_{epoch}.json''' ) , "w" ) as f:
json.dump(UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( ) ->str:
"""simple docstring"""
a_ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=UpperCAmelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=UpperCAmelCase , )
parser.add_argument(
"--output_dir" , type=UpperCAmelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=UpperCAmelCase , default=UpperCAmelCase , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=UpperCAmelCase , default=UpperCAmelCase , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=UpperCAmelCase , default=2 , help="Number of train epochs." , )
a_ = parser.parse_args()
a_ = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
main()
| 303
|
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase = "▁" , __UpperCAmelCase = True , __UpperCAmelCase = "<unk>" , __UpperCAmelCase = "</s>" , __UpperCAmelCase = "<pad>" , ) ->str:
a_ = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
a_ = [None] * len(self.special_tokens)
for token_dict in self.special_tokens.values():
a_ = token_dict["token"]
a_ = Tokenizer(Unigram())
a_ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}") , " "),
normalizers.Lowercase(),
])
a_ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase),
pre_tokenizers.Digits(individual_digits=__UpperCAmelCase),
pre_tokenizers.Punctuation(),
])
a_ = decoders.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase)
a_ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
a_ = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(__UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->Optional[Any]:
a_ = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a_ = [files]
self._tokenizer.train(__UpperCAmelCase , trainer=__UpperCAmelCase)
self.add_unk_id()
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->int:
a_ = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
self._tokenizer.train_from_iterator(__UpperCAmelCase , trainer=__UpperCAmelCase)
self.add_unk_id()
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = json.loads(self._tokenizer.to_str())
a_ = self.special_tokens["unk"]["id"]
a_ = Tokenizer.from_str(json.dumps(__UpperCAmelCase))
| 303
| 1
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=3 , __UpperCAmelCase=32 , __UpperCAmelCase=3 , __UpperCAmelCase=10 , __UpperCAmelCase=[10, 20, 30, 40] , __UpperCAmelCase=[1, 1, 2, 1] , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=3 , __UpperCAmelCase=None , ) ->str:
a_ = parent
a_ = batch_size
a_ = image_size
a_ = num_channels
a_ = embeddings_size
a_ = hidden_sizes
a_ = depths
a_ = is_training
a_ = use_labels
a_ = hidden_act
a_ = num_labels
a_ = scope
a_ = len(__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.num_labels)
a_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self) ->str:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->List[Any]:
a_ = TFResNetModel(config=__UpperCAmelCase)
a_ = model(__UpperCAmelCase)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str:
a_ = self.num_labels
a_ = TFResNetForImageClassification(__UpperCAmelCase)
a_ = model(__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ = config_and_inputs
a_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : int = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
a_ : Union[str, Any] = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
a_ : Optional[int] = False
a_ : str = False
a_ : Any = False
a_ : List[Any] = False
a_ : Any = False
def UpperCAmelCase__ ( self) ->str:
a_ = TFResNetModelTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self) ->List[Any]:
return
@unittest.skip(reason="ResNet does not use inputs_embeds")
def UpperCAmelCase__ ( self) ->int:
pass
@unittest.skip(reason="ResNet does not support input and output embeddings")
def UpperCAmelCase__ ( self) ->Tuple:
pass
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(__UpperCAmelCase)
a_ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase):
a_ = model_class(__UpperCAmelCase)
a_ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase))
a_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a_ = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase) , expected_num_stages + 1)
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
a_ = layer_type
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->Dict:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = TFResNetModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
def UpperCamelCase ( ) ->List[Any]:
"""simple docstring"""
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self) ->List[Any]:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=__UpperCAmelCase , return_tensors="tf")
# forward pass
a_ = model(**__UpperCAmelCase)
# verify the logits
a_ = tf.TensorShape((1, 10_00))
self.assertEqual(outputs.logits.shape , __UpperCAmelCase)
a_ = tf.constant([-11.1_069, -9.7_877, -8.3_777])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __UpperCAmelCase , atol=1E-4))
| 303
|
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = hf_hub_url(repo_id=UpperCAmelCase , path=UpperCAmelCase , revision=UpperCAmelCase )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(UpperCAmelCase )}'''
| 303
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1_28 , __UpperCAmelCase=32 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) ->int:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = scope
def UpperCAmelCase__ ( self) ->List[str]:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = ids_tensor([self.batch_size] , self.num_choices)
a_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self) ->Optional[Any]:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self) ->Dict:
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.prepare_config_and_inputs()
a_ = True
a_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->List[str]:
a_ = NezhaModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->str:
a_ = True
a_ = NezhaModel(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str:
a_ = NezhaForMaskedLM(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->List[Any]:
a_ = NezhaForNextSentencePrediction(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str:
a_ = NezhaForPreTraining(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , next_sentence_label=__UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[Any]:
a_ = NezhaForQuestionAnswering(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int:
a_ = self.num_labels
a_ = NezhaForSequenceClassification(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[Any]:
a_ = self.num_labels
a_ = NezhaForTokenClassification(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[int]:
a_ = self.num_choices
a_ = NezhaForMultipleChoice(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : int = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
a_ : str = (
{
"""feature-extraction""": NezhaModel,
"""fill-mask""": NezhaForMaskedLM,
"""question-answering""": NezhaForQuestionAnswering,
"""text-classification""": NezhaForSequenceClassification,
"""token-classification""": NezhaForTokenClassification,
"""zero-shot""": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : Optional[Any] = True
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False) ->Optional[int]:
a_ = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase)
if return_labels:
if model_class in get_values(__UpperCAmelCase):
a_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase)
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase)
return inputs_dict
def UpperCAmelCase__ ( self) ->int:
a_ = NezhaModelTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
# This regression test was failing with PyTorch < 1.3
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
a_ = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
def UpperCAmelCase__ ( self) ->int:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Any:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->int:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = NezhaModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
@slow
@require_torch_gpu
def UpperCAmelCase__ ( self) ->int:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
a_ = True
a_ = model_class(config=__UpperCAmelCase)
a_ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase)
a_ = torch.jit.trace(
__UpperCAmelCase , (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__UpperCAmelCase , os.path.join(__UpperCAmelCase , "bert.pt"))
a_ = torch.jit.load(os.path.join(__UpperCAmelCase , "bert.pt") , map_location=__UpperCAmelCase)
loaded(inputs_dict["input_ids"].to(__UpperCAmelCase) , inputs_dict["attention_mask"].to(__UpperCAmelCase))
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->Any:
a_ = NezhaModel.from_pretrained("sijunhe/nezha-cn-base")
a_ = torch.tensor([[0, 1, 2, 3, 4, 5]])
a_ = torch.tensor([[0, 1, 1, 1, 1, 1]])
with torch.no_grad():
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)[0]
a_ = torch.Size((1, 6, 7_68))
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1E-4))
@slow
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base")
a_ = torch.tensor([[0, 1, 2, 3, 4, 5]])
a_ = torch.tensor([[1, 1, 1, 1, 1, 1]])
with torch.no_grad():
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)[0]
a_ = torch.Size((1, 6, 2_11_28))
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1E-4))
| 303
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Tuple = """audio-spectrogram-transformer"""
def __init__( self , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=10 , __UpperCAmelCase=10_24 , __UpperCAmelCase=1_28 , **__UpperCAmelCase , ) ->str:
super().__init__(**__UpperCAmelCase)
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = patch_size
a_ = qkv_bias
a_ = frequency_stride
a_ = time_stride
a_ = max_length
a_ = num_mel_bins
| 303
| 1
|
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Tuple = (IPNDMScheduler,)
a_ : List[str] = (("""num_inference_steps""", 50),)
def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->List[Any]:
a_ = {"num_train_timesteps": 10_00}
config.update(**__UpperCAmelCase)
return config
def UpperCAmelCase__ ( self , __UpperCAmelCase=0 , **__UpperCAmelCase) ->Dict:
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , __UpperCAmelCase)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config(**__UpperCAmelCase)
a_ = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
if time_step is None:
a_ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCAmelCase)
a_ = scheduler_class.from_pretrained(__UpperCAmelCase)
new_scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a_ = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a_ = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self) ->List[str]:
pass
def UpperCAmelCase__ ( self , __UpperCAmelCase=0 , **__UpperCAmelCase) ->List[str]:
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , __UpperCAmelCase)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residuals (must be after setting timesteps)
a_ = dummy_past_residuals[:]
if time_step is None:
a_ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCAmelCase)
a_ = scheduler_class.from_pretrained(__UpperCAmelCase)
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residual (must be after setting timesteps)
a_ = dummy_past_residuals[:]
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a_ = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a_ = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->Any:
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(**__UpperCAmelCase)
a_ = scheduler_class(**__UpperCAmelCase)
a_ = 10
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCAmelCase)
for i, t in enumerate(scheduler.timesteps):
a_ = model(__UpperCAmelCase , __UpperCAmelCase)
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase).prev_sample
for i, t in enumerate(scheduler.timesteps):
a_ = model(__UpperCAmelCase , __UpperCAmelCase)
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase).prev_sample
return sample
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , __UpperCAmelCase)
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**__UpperCAmelCase)
a_ = self.dummy_sample
a_ = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCAmelCase , "set_timesteps"):
scheduler.set_timesteps(__UpperCAmelCase)
elif num_inference_steps is not None and not hasattr(__UpperCAmelCase , "set_timesteps"):
a_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a_ = dummy_past_residuals[:]
a_ = scheduler.timesteps[5]
a_ = scheduler.timesteps[6]
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def UpperCAmelCase__ ( self) ->Union[str, Any]:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase , time_step=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00]):
self.check_over_forward(num_inference_steps=__UpperCAmelCase , time_step=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.full_loop()
a_ = torch.mean(torch.abs(__UpperCAmelCase))
assert abs(result_mean.item() - 2_54_05_29) < 10
| 303
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : str = """xlm-roberta"""
def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) ->Union[str, Any]:
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = use_cache
a_ = classifier_dropout
class snake_case ( SCREAMING_SNAKE_CASE_ ):
@property
def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
a_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 303
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
a_ = k.replace(UpperCAmelCase , UpperCAmelCase )
if k.startswith("encoder" ):
a_ = k.replace(".attn" , ".self_attn" )
a_ = k.replace("norm1" , "self_attn_layer_norm" )
a_ = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
a_ = k.replace("norm1" , "self_attn_layer_norm" )
a_ = k.replace("norm2" , "encoder_attn_layer_norm" )
a_ = k.replace("norm3" , "final_layer_norm" )
return k
def UpperCamelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
a_ = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
a_ = sd.pop(UpperCAmelCase )
a_ = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
a_ = v
UpperCamelCase_ = ['START']
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = torch.load(UpperCAmelCase , map_location="cpu" )
a_ = model["model"]
a_ = BlenderbotConfig.from_json_file(UpperCAmelCase )
a_ = BlenderbotForConditionalGeneration(UpperCAmelCase )
a_ = m.model.state_dict().keys()
a_ = []
a_ = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
a_ = rename_state_dict_key(UpperCAmelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
a_ = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCAmelCase )
m.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
m.half()
m.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
UpperCamelCase_ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 303
|
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=24 , __UpperCAmelCase=2 , __UpperCAmelCase=6 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=10_00 , ) ->List[str]:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = scope
a_ = range_bbox
def UpperCAmelCase__ ( self) ->int:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
a_ = bbox[i, j, 3]
a_ = bbox[i, j, 1]
a_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a_ = bbox[i, j, 2]
a_ = bbox[i, j, 0]
a_ = t
a_ = None
if self.use_input_mask:
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self) ->List[str]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Any:
a_ = LiltModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Union[str, Any]:
a_ = self.num_labels
a_ = LiltForTokenClassification(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Dict:
a_ = LiltForQuestionAnswering(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self) ->str:
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ : List[str] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : Any = False
a_ : Dict = False
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int:
return True
def UpperCAmelCase__ ( self) ->str:
a_ = LiltModelTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ = type
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->str:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->List[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = LiltModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
@slow
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__UpperCAmelCase)
a_ = torch.tensor([[1, 2]] , device=__UpperCAmelCase)
a_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__UpperCAmelCase)
# forward pass
with torch.no_grad():
a_ = model(input_ids=__UpperCAmelCase , bbox=__UpperCAmelCase)
a_ = torch.Size([1, 2, 7_68])
a_ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__UpperCAmelCase , )
self.assertTrue(outputs.last_hidden_state.shape , __UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __UpperCAmelCase , atol=1E-3))
| 303
| 1
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
a_ = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
a_ = True if "large" in model_name or "huge" in model_name else False
a_ = True if "large" in model_name or "huge" in model_name else False
a_ = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
a_ = [3, 3, 3, 3]
a_ = [5, 5, 5, 5]
elif "fl4" in model_name:
a_ = [4, 4, 4, 4]
a_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
a_ = [3, 3, 3, 3]
if "lrf" in model_name:
a_ = [3, 3, 3, 3]
else:
a_ = [2, 2, 2, 2]
if "tiny" in model_name:
a_ = 96
elif "small" in model_name:
a_ = 96
elif "base" in model_name:
a_ = 128
elif "large" in model_name:
a_ = 192
elif "xlarge" in model_name:
a_ = 256
elif "huge" in model_name:
a_ = 352
# set label information
a_ = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
a_ = "imagenet-22k-id2label.json"
else:
a_ = "imagenet-1k-id2label.json"
a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = {v: k for k, v in idalabel.items()}
a_ = FocalNetConfig(
embed_dim=UpperCAmelCase , depths=UpperCAmelCase , focal_levels=UpperCAmelCase , focal_windows=UpperCAmelCase , use_conv_embed=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase , use_post_layernorm=UpperCAmelCase , use_layerscale=UpperCAmelCase , )
return config
def UpperCamelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
if "patch_embed.proj" in name:
a_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a_ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
a_ = "encoder." + name
if "encoder.layers" in name:
a_ = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
a_ = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
a_ = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
a_ = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
a_ = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
a_ = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
a_ = "layernorm.weight"
if name == "norm.bias":
a_ = "layernorm.bias"
if "head" in name:
a_ = name.replace("head" , "classifier" )
else:
a_ = "focalnet." + name
return name
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) ->Dict:
"""simple docstring"""
a_ = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
a_ = model_name_to_url[model_name]
print("Checkpoint URL: " , UpperCAmelCase )
a_ = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
a_ = get_focalnet_config(UpperCAmelCase )
a_ = FocalNetForImageClassification(UpperCAmelCase )
model.eval()
# load state dict
model.load_state_dict(UpperCAmelCase )
# verify conversion
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = BitImageProcessor(
do_resize=UpperCAmelCase , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase , crop_size=224 , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , )
a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
a_ = processor(images=UpperCAmelCase , return_tensors="pt" )
a_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
a_ = image_transforms(UpperCAmelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCAmelCase , atol=1E-4 )
a_ = model(**UpperCAmelCase )
a_ = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
a_ = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
a_ = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
a_ = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
a_ = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
a_ = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
a_ = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
UpperCamelCase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 303
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def UpperCAmelCase__ ( self) ->Any:
a_ = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(__UpperCAmelCase , "embed_dim"))
self.parent.assertTrue(hasattr(__UpperCAmelCase , "num_heads"))
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=[16, 48, 96] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ) ->Optional[int]:
a_ = parent
a_ = batch_size
a_ = image_size
a_ = patch_sizes
a_ = patch_stride
a_ = patch_padding
a_ = is_training
a_ = use_labels
a_ = num_labels
a_ = num_channels
a_ = embed_dim
a_ = num_heads
a_ = stride_kv
a_ = depth
a_ = cls_token
a_ = attention_drop_rate
a_ = initializer_range
a_ = layer_norm_eps
def UpperCAmelCase__ ( self) ->Any:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ = None
if self.use_labels:
# create a random int32 tensor of given shape
a_ = ids_tensor([self.batch_size] , self.num_labels)
a_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self) ->Union[str, Any]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[Any]:
a_ = TFCvtModel(config=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , training=__UpperCAmelCase)
a_ = (self.image_size, self.image_size)
a_ , a_ = image_size[0], image_size[1]
for i in range(len(self.depth)):
a_ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
a_ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str:
a_ = self.num_labels
a_ = TFCvtForImageClassification(__UpperCAmelCase)
a_ = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ = config_and_inputs
a_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Union[str, Any] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
a_ : List[Any] = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
a_ : Any = False
a_ : Dict = False
a_ : Optional[int] = False
a_ : List[Any] = False
a_ : List[Any] = False
def UpperCAmelCase__ ( self) ->List[str]:
a_ = TFCvtModelTester(self)
a_ = TFCvtConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[str]:
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions")
def UpperCAmelCase__ ( self) ->Dict:
pass
@unittest.skip(reason="Cvt does not use inputs_embeds")
def UpperCAmelCase__ ( self) ->List[str]:
pass
@unittest.skip(reason="Cvt does not support input and output embeddings")
def UpperCAmelCase__ ( self) ->Optional[Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
def UpperCAmelCase__ ( self) ->Dict:
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def UpperCAmelCase__ ( self) ->List[str]:
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8")
def UpperCAmelCase__ ( self) ->Dict:
a_ = tf.keras.mixed_precision.Policy("mixed_float16")
tf.keras.mixed_precision.set_global_policy(__UpperCAmelCase)
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32")
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(__UpperCAmelCase)
a_ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase):
a_ = model_class(__UpperCAmelCase)
a_ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase))
a_ = outputs.hidden_states
a_ = len(self.model_tester.depth)
self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->str:
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = TFCvtModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self) ->int:
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def UpperCAmelCase__ ( self) ->Any:
a_ = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=__UpperCAmelCase , return_tensors="tf")
# forward pass
a_ = model(**__UpperCAmelCase)
# verify the logits
a_ = tf.TensorShape((1, 10_00))
self.assertEqual(outputs.logits.shape , __UpperCAmelCase)
a_ = tf.constant([0.9_285, 0.9_015, -0.3_150])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __UpperCAmelCase , atol=1E-4))
| 303
| 1
|
"""simple docstring"""
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->Optional[Any]:
return F'''gaussian_noise_s={seed}_shape={"_".join([str(__UpperCAmelCase) for s in shape])}.npy'''
def UpperCAmelCase__ ( self) ->str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCAmelCase__ ( self , __UpperCAmelCase=0 , __UpperCAmelCase=(4, 4, 64, 64) , __UpperCAmelCase=False) ->str:
a_ = jnp.bfloataa if fpaa else jnp.floataa
a_ = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase , __UpperCAmelCase)) , dtype=__UpperCAmelCase)
return image
def UpperCAmelCase__ ( self , __UpperCAmelCase=False , __UpperCAmelCase="CompVis/stable-diffusion-v1-4") ->Tuple:
a_ = jnp.bfloataa if fpaa else jnp.floataa
a_ = "bf16" if fpaa else None
a_ , a_ = FlaxUNetaDConditionModel.from_pretrained(
__UpperCAmelCase , subfolder="unet" , dtype=__UpperCAmelCase , revision=__UpperCAmelCase)
return model, params
def UpperCAmelCase__ ( self , __UpperCAmelCase=0 , __UpperCAmelCase=(4, 77, 7_68) , __UpperCAmelCase=False) ->Optional[int]:
a_ = jnp.bfloataa if fpaa else jnp.floataa
a_ = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase , __UpperCAmelCase)) , dtype=__UpperCAmelCase)
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]],
[17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]],
[8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]],
[3, 10_00, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]],
# fmt: on
])
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Tuple:
a_ , a_ = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=__UpperCAmelCase)
a_ = self.get_latents(__UpperCAmelCase , fpaa=__UpperCAmelCase)
a_ = self.get_encoder_hidden_states(__UpperCAmelCase , fpaa=__UpperCAmelCase)
a_ = model.apply(
{"params": params} , __UpperCAmelCase , jnp.array(__UpperCAmelCase , dtype=jnp.intaa) , encoder_hidden_states=__UpperCAmelCase , ).sample
assert sample.shape == latents.shape
a_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())) , dtype=jnp.floataa)
a_ = jnp.array(__UpperCAmelCase , dtype=jnp.floataa)
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2)
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]],
[17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]],
[8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]],
[3, 10_00, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]],
# fmt: on
])
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->List[Any]:
a_ , a_ = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=__UpperCAmelCase)
a_ = self.get_latents(__UpperCAmelCase , shape=(4, 4, 96, 96) , fpaa=__UpperCAmelCase)
a_ = self.get_encoder_hidden_states(__UpperCAmelCase , shape=(4, 77, 10_24) , fpaa=__UpperCAmelCase)
a_ = model.apply(
{"params": params} , __UpperCAmelCase , jnp.array(__UpperCAmelCase , dtype=jnp.intaa) , encoder_hidden_states=__UpperCAmelCase , ).sample
assert sample.shape == latents.shape
a_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())) , dtype=jnp.floataa)
a_ = jnp.array(__UpperCAmelCase , dtype=jnp.floataa)
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2)
| 303
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Dict = """Speech2TextFeatureExtractor"""
a_ : str = """Speech2TextTokenizer"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase) ->List[str]:
super().__init__(__UpperCAmelCase , __UpperCAmelCase)
a_ = self.feature_extractor
a_ = False
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->Optional[int]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase)
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.")
a_ = kwargs.pop("raw_speech")
else:
a_ = kwargs.pop("audio" , __UpperCAmelCase)
a_ = kwargs.pop("sampling_rate" , __UpperCAmelCase)
a_ = kwargs.pop("text" , __UpperCAmelCase)
if len(__UpperCAmelCase) > 0:
a_ = args[0]
a_ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if audio is not None:
a_ = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase)
if text is not None:
a_ = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase)
if text is None:
return inputs
elif audio is None:
return encodings
else:
a_ = encodings["input_ids"]
return inputs
def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->str:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase)
def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->int:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase)
@contextmanager
def UpperCAmelCase__ ( self) ->Tuple:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call.")
a_ = True
a_ = self.tokenizer
yield
a_ = self.feature_extractor
a_ = False
| 303
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
UpperCamelCase_ = logging.get_logger(__name__)
@dataclass
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **__UpperCAmelCase) ->Dict:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
a_ = deprecated_arg[3:]
setattr(self , __UpperCAmelCase , not kwargs.pop(__UpperCAmelCase))
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''')
a_ = kwargs.pop("torchscript" , self.torchscript)
a_ = kwargs.pop("torch_xla_tpu_print_metrics" , self.torch_xla_tpu_print_metrics)
a_ = kwargs.pop("fp16_opt_level" , self.fpaa_opt_level)
super().__init__(**__UpperCAmelCase)
a_ : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Trace the models using torchscript"""} )
a_ : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} )
a_ : str = field(
default="""O1""" , metadata={
"""help""": (
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. """
"""See details at https://nvidia.github.io/apex/amp.html"""
)
} , )
@cached_property
def UpperCAmelCase__ ( self) ->Tuple["torch.device", int]:
requires_backends(self , ["torch"])
logger.info("PyTorch: setting up devices")
if not self.cuda:
a_ = torch.device("cpu")
a_ = 0
elif is_torch_tpu_available():
a_ = xm.xla_device()
a_ = 0
else:
a_ = torch.device("cuda" if torch.cuda.is_available() else "cpu")
a_ = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase__ ( self) ->Any:
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase__ ( self) ->int:
requires_backends(self , ["torch"])
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase__ ( self) ->"torch.device":
requires_backends(self , ["torch"])
return self._setup_devices[0]
@property
def UpperCAmelCase__ ( self) ->Optional[int]:
requires_backends(self , ["torch"])
return self._setup_devices[1]
@property
def UpperCAmelCase__ ( self) ->Optional[Any]:
return self.n_gpu > 0
| 303
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 1
|
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
UpperCamelCase_ = numpy.array([0, 0])
UpperCamelCase_ = numpy.array([0.5, 0.8_66_02_54])
UpperCamelCase_ = numpy.array([1, 0])
UpperCamelCase_ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->list[numpy.ndarray]:
"""simple docstring"""
a_ = initial_vectors
for _ in range(UpperCAmelCase ):
a_ = iteration_step(UpperCAmelCase )
return vectors
def UpperCamelCase ( UpperCAmelCase ) ->list[numpy.ndarray]:
"""simple docstring"""
a_ = []
for i, start_vector in enumerate(vectors[:-1] ):
a_ = vectors[i + 1]
new_vectors.append(UpperCAmelCase )
a_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->numpy.ndarray:
"""simple docstring"""
a_ = numpy.radians(UpperCAmelCase )
a_ , a_ = numpy.cos(UpperCAmelCase ), numpy.sin(UpperCAmelCase )
a_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase ) ->None:
"""simple docstring"""
a_ = plt.gca()
axes.set_aspect("equal" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
a_ , a_ = zip(*UpperCAmelCase )
plt.plot(UpperCAmelCase , UpperCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 303
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , ) ->int:
super().__init__()
self.register_modules(transformer=__UpperCAmelCase , vae=__UpperCAmelCase , scheduler=__UpperCAmelCase)
# create a imagenet -> id dictionary for easier use
a_ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(","):
a_ = int(__UpperCAmelCase)
a_ = dict(sorted(self.labels.items()))
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->List[int]:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase):
a_ = list(__UpperCAmelCase)
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''')
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = 4.0 , __UpperCAmelCase = None , __UpperCAmelCase = 50 , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , ) ->Union[ImagePipelineOutput, Tuple]:
a_ = len(__UpperCAmelCase)
a_ = self.transformer.config.sample_size
a_ = self.transformer.config.in_channels
a_ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__UpperCAmelCase , device=self.device , dtype=self.transformer.dtype , )
a_ = torch.cat([latents] * 2) if guidance_scale > 1 else latents
a_ = torch.tensor(__UpperCAmelCase , device=self.device).reshape(-1)
a_ = torch.tensor([10_00] * batch_size , device=self.device)
a_ = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__UpperCAmelCase)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
a_ = latent_model_input[: len(__UpperCAmelCase) // 2]
a_ = torch.cat([half, half] , dim=0)
a_ = self.scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase)
a_ = t
if not torch.is_tensor(__UpperCAmelCase):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
a_ = latent_model_input.device.type == "mps"
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a_ = torch.floataa if is_mps else torch.floataa
else:
a_ = torch.intaa if is_mps else torch.intaa
a_ = torch.tensor([timesteps] , dtype=__UpperCAmelCase , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
a_ = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
a_ = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
a_ = self.transformer(
__UpperCAmelCase , timestep=__UpperCAmelCase , class_labels=__UpperCAmelCase).sample
# perform guidance
if guidance_scale > 1:
a_ , a_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
a_ , a_ = torch.split(__UpperCAmelCase , len(__UpperCAmelCase) // 2 , dim=0)
a_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
a_ = torch.cat([half_eps, half_eps] , dim=0)
a_ = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
a_ , a_ = torch.split(__UpperCAmelCase , __UpperCAmelCase , dim=1)
else:
a_ = noise_pred
# compute previous image: x_t -> x_t-1
a_ = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase).prev_sample
if guidance_scale > 1:
a_ , a_ = latent_model_input.chunk(2 , dim=0)
else:
a_ = latent_model_input
a_ = 1 / self.vae.config.scaling_factor * latents
a_ = self.vae.decode(__UpperCAmelCase).sample
a_ = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
a_ = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
a_ = self.numpy_to_pil(__UpperCAmelCase)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__UpperCAmelCase)
| 303
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase_ = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
|
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
a_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
a_ = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
a_ = F'''{src_lang}-{tgt_lang}'''
a_ = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=UpperCAmelCase , exist_ok=UpperCAmelCase )
a_ = os.path.join(UpperCAmelCase , "README.md" )
print(F'''Generating {path}''' )
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(UpperCAmelCase )
# make sure we are under the root of the project
UpperCamelCase_ = Path(__file__).resolve().parent.parent.parent
UpperCamelCase_ = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
UpperCamelCase_ = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
| 303
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(UpperCAmelCase , n - 1 , UpperCAmelCase ) * a) % mod
else:
a_ = binary_exponentiation(UpperCAmelCase , n / 2 , UpperCAmelCase )
return (b * b) % mod
# a prime number
UpperCamelCase_ = 701
UpperCamelCase_ = 1000000000
UpperCamelCase_ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 303
| 1
|
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCamelCase_ = get_logger(__name__)
class snake_case :
a_ : Union[str, Any] = """dummy_data"""
a_ : Optional[int] = """datasets"""
a_ : Optional[Any] = False
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , ) ->Union[str, Any]:
a_ = 0
a_ = dataset_name
a_ = cache_dir
a_ = use_local_dummy_data
a_ = config
# download_callbacks take a single url as input
a_ = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
a_ = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
a_ = str(__UpperCAmelCase)
# to be downloaded
a_ = None
a_ = None
@property
def UpperCAmelCase__ ( self) ->Union[str, Any]:
if self._dummy_file is None:
a_ = self.download_dummy_data()
return self._dummy_file
@property
def UpperCAmelCase__ ( self) ->Any:
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name)
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name)
@property
def UpperCAmelCase__ ( self) ->Any:
return os.path.join(self.dummy_data_folder , "dummy_data.zip")
def UpperCAmelCase__ ( self) ->Dict:
a_ = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
a_ = cached_path(
__UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=__UpperCAmelCase , force_extract=__UpperCAmelCase)
return os.path.join(__UpperCAmelCase , self.dummy_file_name)
@property
def UpperCAmelCase__ ( self) ->List[str]:
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file)
@property
def UpperCAmelCase__ ( self) ->Any:
if self._bucket_url is None:
a_ = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/"))
return self._bucket_url
@property
def UpperCAmelCase__ ( self) ->int:
# return full path if its a dir
if os.path.isdir(self.dummy_file):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/").split("/")[:-1])
def UpperCAmelCase__ ( self , __UpperCAmelCase , *__UpperCAmelCase) ->Any:
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
a_ = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
a_ = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
return self.create_dummy_data_dict(__UpperCAmelCase , __UpperCAmelCase)
elif isinstance(__UpperCAmelCase , (list, tuple)):
return self.create_dummy_data_list(__UpperCAmelCase , __UpperCAmelCase)
else:
return self.create_dummy_data_single(__UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , *__UpperCAmelCase) ->List[Any]:
return self.download_and_extract(__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->Optional[Any]:
return self.download_and_extract(__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase) ->Optional[Any]:
return path
def UpperCAmelCase__ ( self) ->List[Any]:
return {}
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->Tuple:
a_ = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
for single_url in single_urls:
download_callback(__UpperCAmelCase)
else:
a_ = single_urls
download_callback(__UpperCAmelCase)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a_ = [os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase).name)) for x in single_urls]
else:
a_ = single_urls
a_ = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase).name))
a_ = value
# make sure that values are unique
if all(isinstance(__UpperCAmelCase , __UpperCAmelCase) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len(
dummy_data_dict.values()):
# append key to value to make its name unique
a_ = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->List[str]:
a_ = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
a_ = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , __UpperCAmelCase)) for url in data_url)
a_ = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed") for url in data_url)
if data_url and (is_tf_records or is_pubmed_records):
a_ = [data_url[0]] * len(__UpperCAmelCase)
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__UpperCAmelCase)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
a_ = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(single_url.split("/")[-1]))
dummy_data_list.append(__UpperCAmelCase)
return dummy_data_list
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->Optional[int]:
for download_callback in self.download_callbacks:
download_callback(__UpperCAmelCase)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
a_ = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(data_url.split("/")[-1]))
if os.path.exists(__UpperCAmelCase) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCAmelCase__ ( self) ->Optional[Any]:
pass
def UpperCAmelCase__ ( self) ->int:
pass
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->str:
def _iter_archive_members(__UpperCAmelCase):
# this preserves the order of the members inside the ZIP archive
a_ = Path(self.dummy_file).parent
a_ = path.relative_to(__UpperCAmelCase)
with ZipFile(self.local_path_to_dummy_data) as zip_file:
a_ = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix()):
yield dummy_parent_path.joinpath(__UpperCAmelCase)
a_ = Path(__UpperCAmelCase)
a_ = _iter_archive_members(__UpperCAmelCase) if self.use_local_dummy_data else path.rglob("*")
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__")):
yield file_path.relative_to(__UpperCAmelCase).as_posix(), file_path.open("rb")
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->str:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase):
a_ = [paths]
for path in paths:
if os.path.isfile(__UpperCAmelCase):
if os.path.basename(__UpperCAmelCase).startswith((".", "__")):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__UpperCAmelCase):
if os.path.basename(__UpperCAmelCase).startswith((".", "__")):
continue
dirnames.sort()
for filename in sorted(__UpperCAmelCase):
if filename.startswith((".", "__")):
continue
yield os.path.join(__UpperCAmelCase , __UpperCAmelCase)
| 303
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
| 303
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
a_ = u
for i in range(1 , UpperCAmelCase ):
a_ = temp * (u - i)
return temp
def UpperCamelCase ( ) ->None:
"""simple docstring"""
a_ = int(input("enter the numbers of values: " ) )
a_ = []
for _ in range(UpperCAmelCase ):
y.append([] )
for i in range(UpperCAmelCase ):
for j in range(UpperCAmelCase ):
y[i].append(UpperCAmelCase )
a_ = 0
print("enter the values of parameters in a list: " )
a_ = list(map(UpperCAmelCase , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(UpperCAmelCase ):
a_ = float(input() )
a_ = int(input("enter the value to interpolate: " ) )
a_ = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCAmelCase ):
for j in range(n - i ):
a_ = y[j + 1][i - 1] - y[j][i - 1]
a_ = y[0][0]
for i in range(1 , UpperCAmelCase ):
summ += (ucal(UpperCAmelCase , UpperCAmelCase ) * y[0][i]) / math.factorial(UpperCAmelCase )
print(F'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 303
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Dict:
a_ = inspect.getfile(accelerate.test_utils)
a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"])
a_ = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ["scripts", "test_distributed_data_loop.py"])
a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_ops.py"])
@require_multi_gpu
def UpperCAmelCase__ ( self) ->Any:
print(F'''Found {torch.cuda.device_count()} devices.''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->str:
print(F'''Found {torch.cuda.device_count()} devices.''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''')
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__)]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->List[Any]:
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1"):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
if __name__ == "__main__":
UpperCamelCase_ = Accelerator()
UpperCamelCase_ = (accelerator.state.process_index + 2, 10)
UpperCamelCase_ = torch.randint(0, 10, shape).to(accelerator.device)
UpperCamelCase_ = ''
UpperCamelCase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCamelCase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCamelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 303
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
a_ : Any = ["""note_seq"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->Union[str, Any]:
requires_backends(self , ["note_seq"])
@classmethod
def UpperCAmelCase__ ( cls , *__UpperCAmelCase , **__UpperCAmelCase) ->List[Any]:
requires_backends(cls , ["note_seq"])
@classmethod
def UpperCAmelCase__ ( cls , *__UpperCAmelCase , **__UpperCAmelCase) ->Dict:
requires_backends(cls , ["note_seq"])
| 303
|
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->tuple[float | int, list[tuple[int, int]]]:
"""simple docstring"""
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) , np.inf )
a_ = 0
a_ = np.empty((rows, cols) , dtype=UpperCAmelCase )
a_ = None
while queue:
((a_) , (a_)) = heappop(UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(UpperCAmelCase ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(UpperCAmelCase , (dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Optional[int] = """luke"""
def __init__( self , __UpperCAmelCase=5_02_67 , __UpperCAmelCase=50_00_00 , __UpperCAmelCase=7_68 , __UpperCAmelCase=2_56 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , **__UpperCAmelCase , ) ->Tuple:
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a_ = vocab_size
a_ = entity_vocab_size
a_ = hidden_size
a_ = entity_emb_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = use_entity_aware_attention
a_ = classifier_dropout
| 303
|
"""simple docstring"""
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
UpperCamelCase_ = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
UpperCamelCase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class snake_case :
def __init__( self) ->Optional[int]:
a_ = WATERMARK_BITS
a_ = WatermarkEncoder()
self.encoder.set_watermark("bits" , self.watermark)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]:
# can't encode images that are smaller than 256
if images.shape[-1] < 2_56:
return images
a_ = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1).float().numpy()
a_ = [self.encoder.encode(__UpperCAmelCase , "dwtDct") for image in images]
a_ = torch.from_numpy(np.array(__UpperCAmelCase)).permute(0 , 3 , 1 , 2)
a_ = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0)
return images
| 303
| 1
|
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
UpperCamelCase_ = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
UpperCamelCase_ = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
UpperCamelCase_ = BeautifulSoup(res.text, 'html.parser')
UpperCamelCase_ = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(F"""https://google.com{link.get('href')}""")
| 303
|
"""simple docstring"""
import math
UpperCamelCase_ = 10
UpperCamelCase_ = 7
UpperCamelCase_ = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCamelCase ( UpperCAmelCase = 20 ) ->str:
"""simple docstring"""
a_ = math.comb(UpperCAmelCase , UpperCAmelCase )
a_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCAmelCase )
a_ = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 303
| 1
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def UpperCAmelCase__ ( self) ->Any:
a_ = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(__UpperCAmelCase , "embed_dim"))
self.parent.assertTrue(hasattr(__UpperCAmelCase , "num_heads"))
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=[16, 48, 96] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ) ->Optional[int]:
a_ = parent
a_ = batch_size
a_ = image_size
a_ = patch_sizes
a_ = patch_stride
a_ = patch_padding
a_ = is_training
a_ = use_labels
a_ = num_labels
a_ = num_channels
a_ = embed_dim
a_ = num_heads
a_ = stride_kv
a_ = depth
a_ = cls_token
a_ = attention_drop_rate
a_ = initializer_range
a_ = layer_norm_eps
def UpperCAmelCase__ ( self) ->Any:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ = None
if self.use_labels:
# create a random int32 tensor of given shape
a_ = ids_tensor([self.batch_size] , self.num_labels)
a_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self) ->Union[str, Any]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[Any]:
a_ = TFCvtModel(config=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , training=__UpperCAmelCase)
a_ = (self.image_size, self.image_size)
a_ , a_ = image_size[0], image_size[1]
for i in range(len(self.depth)):
a_ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
a_ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str:
a_ = self.num_labels
a_ = TFCvtForImageClassification(__UpperCAmelCase)
a_ = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ = config_and_inputs
a_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Union[str, Any] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
a_ : List[Any] = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
a_ : Any = False
a_ : Dict = False
a_ : Optional[int] = False
a_ : List[Any] = False
a_ : List[Any] = False
def UpperCAmelCase__ ( self) ->List[str]:
a_ = TFCvtModelTester(self)
a_ = TFCvtConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[str]:
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions")
def UpperCAmelCase__ ( self) ->Dict:
pass
@unittest.skip(reason="Cvt does not use inputs_embeds")
def UpperCAmelCase__ ( self) ->List[str]:
pass
@unittest.skip(reason="Cvt does not support input and output embeddings")
def UpperCAmelCase__ ( self) ->Optional[Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
def UpperCAmelCase__ ( self) ->Dict:
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def UpperCAmelCase__ ( self) ->List[str]:
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8")
def UpperCAmelCase__ ( self) ->Dict:
a_ = tf.keras.mixed_precision.Policy("mixed_float16")
tf.keras.mixed_precision.set_global_policy(__UpperCAmelCase)
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32")
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(__UpperCAmelCase)
a_ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase):
a_ = model_class(__UpperCAmelCase)
a_ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase))
a_ = outputs.hidden_states
a_ = len(self.model_tester.depth)
self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->str:
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = TFCvtModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self) ->int:
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def UpperCAmelCase__ ( self) ->Any:
a_ = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=__UpperCAmelCase , return_tensors="tf")
# forward pass
a_ = model(**__UpperCAmelCase)
# verify the logits
a_ = tf.TensorShape((1, 10_00))
self.assertEqual(outputs.logits.shape , __UpperCAmelCase)
a_ = tf.constant([0.9_285, 0.9_015, -0.3_150])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __UpperCAmelCase , atol=1E-4))
| 303
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCamelCase_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
a_ = k.replace(UpperCAmelCase , UpperCAmelCase )
return k
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->PegasusForConditionalGeneration:
"""simple docstring"""
a_ = DEFAULTS.copy()
cfg_kwargs.update(UpperCAmelCase )
a_ = PegasusConfig(**UpperCAmelCase )
a_ = PegasusForConditionalGeneration(UpperCAmelCase )
a_ = torch_model.model.state_dict()
a_ = {}
for k, v in tf_weights.items():
a_ = rename_state_dict_key(UpperCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
a_ = v.T
a_ = torch.tensor(UpperCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
a_ = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
a_ = mapping["shared.weight"]
a_ = mapping["shared.weight"]
a_ = {k: torch.zeros_like(UpperCAmelCase ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**UpperCAmelCase )
a_ , a_ = torch_model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
a_ = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def UpperCamelCase ( UpperCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) ->Dict:
"""simple docstring"""
a_ = tf.train.list_variables(UpperCAmelCase )
a_ = {}
a_ = ["Adafactor", "global_step"]
for name, shape in tqdm(UpperCAmelCase , desc="converting tf checkpoint to dict" ):
a_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
a_ = tf.train.load_variable(UpperCAmelCase , UpperCAmelCase )
a_ = array
return tf_weights
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = Path(UpperCAmelCase ).parent.name
a_ = task_specific_params[F'''summarization_{dataset}''']["max_position_embeddings"]
a_ = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=UpperCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCAmelCase )
# convert model
a_ = get_tf_weights_as_numpy(UpperCAmelCase )
a_ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
a_ = task_specific_params
a_ = convert_pegasus(UpperCAmelCase , UpperCAmelCase )
torch_model.save_pretrained(UpperCAmelCase )
a_ = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(UpperCAmelCase , Path(UpperCAmelCase ) / "pytorch_model.bin" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase_ = parser.parse_args()
if args.save_dir is None:
UpperCamelCase_ = Path(args.tf_ckpt_path).parent.name
UpperCamelCase_ = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 303
| 1
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : List[Any] = (UniPCMultistepScheduler,)
a_ : Optional[Any] = (("""num_inference_steps""", 25),)
def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->Tuple:
a_ = {
"num_train_timesteps": 10_00,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__UpperCAmelCase)
return config
def UpperCAmelCase__ ( self , __UpperCAmelCase=0 , **__UpperCAmelCase) ->Dict:
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , __UpperCAmelCase)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config(**__UpperCAmelCase)
a_ = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residuals
a_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCAmelCase)
a_ = scheduler_class.from_pretrained(__UpperCAmelCase)
new_scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residuals
a_ = dummy_past_residuals[: new_scheduler.config.solver_order]
a_ , a_ = sample, sample
for t in range(__UpperCAmelCase , time_step + scheduler.config.solver_order + 1):
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a_ = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self , __UpperCAmelCase=0 , **__UpperCAmelCase) ->Union[str, Any]:
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , __UpperCAmelCase)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residuals (must be after setting timesteps)
a_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCAmelCase)
a_ = scheduler_class.from_pretrained(__UpperCAmelCase)
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residual (must be after setting timesteps)
a_ = dummy_past_residuals[: new_scheduler.config.solver_order]
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a_ = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self , __UpperCAmelCase=None , **__UpperCAmelCase) ->str:
if scheduler is None:
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(**__UpperCAmelCase)
a_ = scheduler_class(**__UpperCAmelCase)
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(**__UpperCAmelCase)
a_ = scheduler_class(**__UpperCAmelCase)
a_ = 10
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCAmelCase)
for i, t in enumerate(scheduler.timesteps):
a_ = model(__UpperCAmelCase , __UpperCAmelCase)
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase).prev_sample
return sample
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , __UpperCAmelCase)
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**__UpperCAmelCase)
a_ = self.dummy_sample
a_ = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCAmelCase , "set_timesteps"):
scheduler.set_timesteps(__UpperCAmelCase)
elif num_inference_steps is not None and not hasattr(__UpperCAmelCase , "set_timesteps"):
a_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ = [residual + 0.2, residual + 0.15, residual + 0.10]
a_ = dummy_past_residuals[: scheduler.config.solver_order]
a_ = scheduler.timesteps[5]
a_ = scheduler.timesteps[6]
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def UpperCAmelCase__ ( self) ->List[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
a_ = UniPCMultistepScheduler(**self.get_scheduler_config())
a_ = self.full_loop(scheduler=__UpperCAmelCase)
a_ = torch.mean(torch.abs(__UpperCAmelCase))
assert abs(result_mean.item() - 0.2_464) < 1E-3
a_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
a_ = DEISMultistepScheduler.from_config(scheduler.config)
a_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
a_ = UniPCMultistepScheduler.from_config(scheduler.config)
a_ = self.full_loop(scheduler=__UpperCAmelCase)
a_ = torch.mean(torch.abs(__UpperCAmelCase))
assert abs(result_mean.item() - 0.2_464) < 1E-3
def UpperCAmelCase__ ( self) ->str:
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
self.check_over_configs(thresholding=__UpperCAmelCase)
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__UpperCAmelCase , prediction_type=__UpperCAmelCase , sample_max_value=__UpperCAmelCase , solver_order=__UpperCAmelCase , solver_type=__UpperCAmelCase , )
def UpperCAmelCase__ ( self) ->List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__UpperCAmelCase , solver_type=__UpperCAmelCase , prediction_type=__UpperCAmelCase , )
a_ = self.full_loop(
solver_order=__UpperCAmelCase , solver_type=__UpperCAmelCase , prediction_type=__UpperCAmelCase , )
assert not torch.isnan(__UpperCAmelCase).any(), "Samples have nan numbers"
def UpperCAmelCase__ ( self) ->List[str]:
self.check_over_configs(lower_order_final=__UpperCAmelCase)
self.check_over_configs(lower_order_final=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=__UpperCAmelCase , time_step=0)
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = self.full_loop()
a_ = torch.mean(torch.abs(__UpperCAmelCase))
assert abs(result_mean.item() - 0.2_464) < 1E-3
def UpperCAmelCase__ ( self) ->Any:
a_ = self.full_loop(prediction_type="v_prediction")
a_ = torch.mean(torch.abs(__UpperCAmelCase))
assert abs(result_mean.item() - 0.1_014) < 1E-3
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(thresholding=__UpperCAmelCase , dynamic_thresholding_ratio=0)
a_ = scheduler_class(**__UpperCAmelCase)
a_ = 10
a_ = self.dummy_model()
a_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(__UpperCAmelCase)
for i, t in enumerate(scheduler.timesteps):
a_ = model(__UpperCAmelCase , __UpperCAmelCase)
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->Optional[Any]:
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config(**__UpperCAmelCase)
a_ = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(scheduler.config.num_train_timesteps)
assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps
| 303
|
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=50 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=None , ) ->Dict:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = initializer_range
a_ = use_labels
a_ = scope
def UpperCAmelCase__ ( self) ->Any:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length])
if self.use_labels:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase__ ( self) ->Optional[Any]:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self) ->List[str]:
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.prepare_config_and_inputs()
a_ = True
a_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->str:
a_ = BertGenerationEncoder(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)
a_ = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->Union[str, Any]:
a_ = True
a_ = BertGenerationEncoder(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) ->List[str]:
a_ = True
a_ = True
a_ = BertGenerationDecoder(config=__UpperCAmelCase).to(__UpperCAmelCase).eval()
# first forward pass
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , )
a_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a_ = ids_tensor((self.batch_size, 3) , config.vocab_size)
a_ = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
a_ = torch.cat([input_ids, next_tokens] , dim=-1)
a_ = torch.cat([input_mask, next_mask] , dim=-1)
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
a_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
# select random slice
a_ = ids_tensor((1,) , output_from_past.shape[-1]).item()
a_ = output_from_no_past[:, -3:, random_slice_idx].detach()
a_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , ) ->Tuple:
a_ = BertGenerationDecoder(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase__ ( self) ->str:
a_ , a_ , a_ , a_ = self.prepare_config_and_inputs()
a_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[str] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
a_ : Optional[int] = (BertGenerationDecoder,) if is_torch_available() else ()
a_ : List[Any] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = BertGenerationEncoderTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->Optional[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
a_ , a_ , a_ , a_ = self.model_tester.prepare_config_and_inputs()
a_ = "bert"
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
# This regression test was failing with PyTorch < 1.3
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
a_ = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->str:
a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->int:
a_ = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
a_ = model(__UpperCAmelCase)[0]
a_ = torch.Size([1, 8, 10_24])
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->List[str]:
a_ = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
a_ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]])
with torch.no_grad():
a_ = model(__UpperCAmelCase)[0]
a_ = torch.Size([1, 8, 5_03_58])
self.assertEqual(output.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4))
| 303
| 1
|
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
UpperCamelCase_ = logging.getLogger(__name__)
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
a_ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=UpperCAmelCase , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=UpperCAmelCase , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=UpperCAmelCase , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=UpperCAmelCase , default="data/dump" , help="The dump file prefix." )
a_ = parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
a_ = BertTokenizer.from_pretrained(args.tokenizer_name )
a_ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
a_ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
a_ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
a_ = tokenizer.special_tokens_map["cls_token"] # `<s>`
a_ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
a_ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
a_ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
a_ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
a_ = fp.readlines()
logger.info("Start encoding" )
logger.info(F'''{len(UpperCAmelCase )} examples to process.''' )
a_ = []
a_ = 0
a_ = 10_000
a_ = time.time()
for text in data:
a_ = F'''{bos} {text.strip()} {sep}'''
a_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
rslt.append(UpperCAmelCase )
iter += 1
if iter % interval == 0:
a_ = time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
a_ = time.time()
logger.info("Finished binarization" )
logger.info(F'''{len(UpperCAmelCase )} examples processed.''' )
a_ = F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
a_ = tokenizer.vocab_size
if vocab_size < (1 << 16):
a_ = [np.uintaa(UpperCAmelCase ) for d in rslt]
else:
a_ = [np.intaa(UpperCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(UpperCAmelCase , "wb" ) as handle:
pickle.dump(rslt_ , UpperCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 303
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
if "resnet-50" in model_name:
a_ = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
a_ = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
a_ = DetrConfig(use_timm_backbone=UpperCAmelCase , backbone_config=UpperCAmelCase )
# set label attributes
a_ = "panoptic" in model_name
if is_panoptic:
a_ = 250
else:
a_ = 91
a_ = "huggingface/label-files"
a_ = "coco-detection-id2label.json"
a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = idalabel
a_ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->Optional[Any]:
"""simple docstring"""
a_ = ""
if is_panoptic:
a_ = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[:256, :]
a_ = in_proj_bias[:256]
a_ = in_proj_weight[256:512, :]
a_ = in_proj_bias[256:512]
a_ = in_proj_weight[-256:, :]
a_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[:256, :]
a_ = in_proj_bias[:256]
a_ = in_proj_weight[256:512, :]
a_ = in_proj_bias[256:512]
a_ = in_proj_weight[-256:, :]
a_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
a_ = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a_ = in_proj_weight_cross_attn[:256, :]
a_ = in_proj_bias_cross_attn[:256]
a_ = in_proj_weight_cross_attn[256:512, :]
a_ = in_proj_bias_cross_attn[256:512]
a_ = in_proj_weight_cross_attn[-256:, :]
a_ = in_proj_bias_cross_attn[-256:]
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) ->List[str]:
"""simple docstring"""
a_ , a_ = get_detr_config(UpperCAmelCase )
# load original model from torch hub
a_ = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(F'''Converting model {model_name}...''' )
a_ = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=UpperCAmelCase ).eval()
a_ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(UpperCAmelCase ):
if is_panoptic:
a_ = "detr." + src
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCAmelCase , is_panoptic=UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a_ = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
# finally, create HuggingFace model and load state dict
a_ = DetrForSegmentation(UpperCAmelCase ) if is_panoptic else DetrForObjectDetection(UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
model.eval()
# verify our conversion on an image
a_ = "coco_panoptic" if is_panoptic else "coco_detection"
a_ = DetrImageProcessor(format=UpperCAmelCase )
a_ = processor(images=prepare_img() , return_tensors="pt" )
a_ = encoding["pixel_values"]
a_ = detr(UpperCAmelCase )
a_ = model(UpperCAmelCase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
UpperCamelCase_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 303
| 1
|
"""simple docstring"""
from __future__ import annotations
UpperCamelCase_ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
a_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCAmelCase ) )
] # the reference grid
a_ = 1
a_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCAmelCase ) )
] # the action grid
a_ = init[0]
a_ = init[1]
a_ = 0
a_ = g + heuristic[x][y] # cost from starting cell to destination cell
a_ = [[f, g, x, y]]
a_ = False # flag that is set when search is complete
a_ = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCAmelCase ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
a_ = cell.pop()
a_ = next_cell[2]
a_ = next_cell[3]
a_ = next_cell[1]
if x == goal[0] and y == goal[1]:
a_ = True
else:
for i in range(len(UpperCAmelCase ) ): # to try out different valid actions
a_ = x + DIRECTIONS[i][0]
a_ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
a_ = g + cost
a_ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
a_ = 1
a_ = i
a_ = []
a_ = goal[0]
a_ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
a_ = x - DIRECTIONS[action[x][y]][0]
a_ = y - DIRECTIONS[action[x][y]][1]
a_ = xa
a_ = ya
invpath.append([x, y] )
a_ = []
for i in range(len(UpperCAmelCase ) ):
path.append(invpath[len(UpperCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCamelCase_ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCamelCase_ = [0, 0]
# all coordinates are given in format [y,x]
UpperCamelCase_ = [len(grid) - 1, len(grid[0]) - 1]
UpperCamelCase_ = 1
# the cost map which pushes the path closer to the goal
UpperCamelCase_ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCamelCase_ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCamelCase_ = 99
UpperCamelCase_ , UpperCamelCase_ = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 303
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
a_ = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
a_ = True if "large" in model_name or "huge" in model_name else False
a_ = True if "large" in model_name or "huge" in model_name else False
a_ = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
a_ = [3, 3, 3, 3]
a_ = [5, 5, 5, 5]
elif "fl4" in model_name:
a_ = [4, 4, 4, 4]
a_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
a_ = [3, 3, 3, 3]
if "lrf" in model_name:
a_ = [3, 3, 3, 3]
else:
a_ = [2, 2, 2, 2]
if "tiny" in model_name:
a_ = 96
elif "small" in model_name:
a_ = 96
elif "base" in model_name:
a_ = 128
elif "large" in model_name:
a_ = 192
elif "xlarge" in model_name:
a_ = 256
elif "huge" in model_name:
a_ = 352
# set label information
a_ = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
a_ = "imagenet-22k-id2label.json"
else:
a_ = "imagenet-1k-id2label.json"
a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = {v: k for k, v in idalabel.items()}
a_ = FocalNetConfig(
embed_dim=UpperCAmelCase , depths=UpperCAmelCase , focal_levels=UpperCAmelCase , focal_windows=UpperCAmelCase , use_conv_embed=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase , use_post_layernorm=UpperCAmelCase , use_layerscale=UpperCAmelCase , )
return config
def UpperCamelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
if "patch_embed.proj" in name:
a_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a_ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
a_ = "encoder." + name
if "encoder.layers" in name:
a_ = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
a_ = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
a_ = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
a_ = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
a_ = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
a_ = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
a_ = "layernorm.weight"
if name == "norm.bias":
a_ = "layernorm.bias"
if "head" in name:
a_ = name.replace("head" , "classifier" )
else:
a_ = "focalnet." + name
return name
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) ->Dict:
"""simple docstring"""
a_ = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
a_ = model_name_to_url[model_name]
print("Checkpoint URL: " , UpperCAmelCase )
a_ = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
a_ = get_focalnet_config(UpperCAmelCase )
a_ = FocalNetForImageClassification(UpperCAmelCase )
model.eval()
# load state dict
model.load_state_dict(UpperCAmelCase )
# verify conversion
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = BitImageProcessor(
do_resize=UpperCAmelCase , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase , crop_size=224 , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , )
a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
a_ = processor(images=UpperCAmelCase , return_tensors="pt" )
a_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
a_ = image_transforms(UpperCAmelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCAmelCase , atol=1E-4 )
a_ = model(**UpperCAmelCase )
a_ = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
a_ = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
a_ = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
a_ = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
a_ = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
a_ = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
a_ = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
UpperCamelCase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 303
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCamelCase_ = None
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase_ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
UpperCamelCase_ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
UpperCamelCase_ = '▁'
# Segments (not really needed)
UpperCamelCase_ = 0
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 3
UpperCamelCase_ = 4
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Optional[Any] = VOCAB_FILES_NAMES
a_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[str] = """left"""
a_ : Optional[Any] = XLNetTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , **__UpperCAmelCase , ) ->List[str]:
# Mask token behave like a normal word, i.e. include the space before it
a_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else mask_token
super().__init__(
vocab_file=__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
a_ = 3
a_ = do_lower_case
a_ = remove_space
a_ = keep_accents
a_ = vocab_file
a_ = False if not self.vocab_file else True
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->List[int]:
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->List[int]:
a_ = [self.sep_token_id]
a_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__UpperCAmelCase):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
a_ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__UpperCAmelCase):
copyfile(self.vocab_file , __UpperCAmelCase)
return (out_vocab_file,)
| 303
|
"""simple docstring"""
import os
import numpy
import onnx
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = a.name
a_ = b.name
a_ = ""
a_ = ""
a_ = a == b
a_ = name_a
a_ = name_b
return res
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase , UpperCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = list(model.graph.initializer )
a_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
a_ = inits[i].name
a_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = os.path.dirname(UpperCAmelCase )
a_ = os.path.basename(UpperCAmelCase )
a_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) )
a_ = list(model.graph.initializer )
a_ = set()
a_ = {}
a_ = []
a_ = 0
for i in range(len(UpperCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase )
dup_set.add(UpperCAmelCase )
a_ = inits[j].data_type
a_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , UpperCAmelCase )
total_reduced_size += mem_size
a_ = inits[i].name
a_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase )
else:
a_ = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 1_024 / 1_024 / 1_024 , "GB" )
a_ = sorted(UpperCAmelCase )
_remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
a_ = "optimized_" + model_file_name
a_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
onnx.save(UpperCAmelCase , UpperCAmelCase )
return new_model
| 303
| 1
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
UpperCamelCase_ = 'examples/'
UpperCamelCase_ = {
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
UpperCamelCase_ = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
UpperCamelCase_ = 'README.md'
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
with open(UpperCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
a_ = f.read()
a_ , a_ = REPLACE_PATTERNS[pattern]
a_ = replace.replace("VERSION" , UpperCAmelCase )
a_ = re_pattern.sub(UpperCAmelCase , UpperCAmelCase )
with open(UpperCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
for folder, directories, fnames in os.walk(UpperCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase , pattern="examples" )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->int:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not patch:
update_version_in_examples(UpperCAmelCase )
def UpperCamelCase ( ) ->int:
"""simple docstring"""
a_ = "🤗 Transformers currently provides the following architectures"
a_ = "1. Want to contribute a new model?"
with open(UpperCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
a_ = f.readlines()
# Find the start of the list.
a_ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
a_ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
a_ = lines[index].replace(
"https://huggingface.co/docs/diffusers/main/model_doc" , "https://huggingface.co/docs/diffusers/model_doc" , )
index += 1
with open(UpperCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(UpperCAmelCase )
def UpperCamelCase ( ) ->List[Any]:
"""simple docstring"""
with open(REPLACE_FILES["init"] , "r" ) as f:
a_ = f.read()
a_ = REPLACE_PATTERNS["init"][0].search(UpperCAmelCase ).groups()[0]
return packaging.version.parse(UpperCAmelCase )
def UpperCamelCase ( UpperCAmelCase=False ) ->List[Any]:
"""simple docstring"""
a_ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
a_ = default_version.base_version
elif patch:
a_ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
a_ = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
a_ = input(F'''Which version are you releasing? [{default_version}]''' )
if len(UpperCAmelCase ) == 0:
a_ = default_version
print(F'''Updating version to {version}.''' )
global_version_update(UpperCAmelCase , patch=UpperCAmelCase )
def UpperCamelCase ( ) ->int:
"""simple docstring"""
a_ = get_version()
a_ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
a_ = current_version.base_version
# Check with the user we got that right.
a_ = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(UpperCAmelCase ) == 0:
a_ = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(UpperCAmelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
UpperCamelCase_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 303
|
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase = "▁" , __UpperCAmelCase = True , __UpperCAmelCase = "<unk>" , __UpperCAmelCase = "</s>" , __UpperCAmelCase = "<pad>" , ) ->str:
a_ = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
a_ = [None] * len(self.special_tokens)
for token_dict in self.special_tokens.values():
a_ = token_dict["token"]
a_ = Tokenizer(Unigram())
a_ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}") , " "),
normalizers.Lowercase(),
])
a_ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase),
pre_tokenizers.Digits(individual_digits=__UpperCAmelCase),
pre_tokenizers.Punctuation(),
])
a_ = decoders.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase)
a_ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
a_ = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(__UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->Optional[Any]:
a_ = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a_ = [files]
self._tokenizer.train(__UpperCAmelCase , trainer=__UpperCAmelCase)
self.add_unk_id()
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = 80_00 , __UpperCAmelCase = True , ) ->int:
a_ = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
self._tokenizer.train_from_iterator(__UpperCAmelCase , trainer=__UpperCAmelCase)
self.add_unk_id()
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = json.loads(self._tokenizer.to_str())
a_ = self.special_tokens["unk"]["id"]
a_ = Tokenizer.from_str(json.dumps(__UpperCAmelCase))
| 303
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase_ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['ConvNextFeatureExtractor']
UpperCamelCase_ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 303
|
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = hf_hub_url(repo_id=UpperCAmelCase , path=UpperCAmelCase , revision=UpperCAmelCase )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(UpperCAmelCase )}'''
| 303
| 1
|
"""simple docstring"""
import math
UpperCamelCase_ = 10
UpperCamelCase_ = 7
UpperCamelCase_ = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCamelCase ( UpperCAmelCase = 20 ) ->str:
"""simple docstring"""
a_ = math.comb(UpperCAmelCase , UpperCAmelCase )
a_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCAmelCase )
a_ = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 303
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Tuple = """audio-spectrogram-transformer"""
def __init__( self , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=10 , __UpperCAmelCase=10_24 , __UpperCAmelCase=1_28 , **__UpperCAmelCase , ) ->str:
super().__init__(**__UpperCAmelCase)
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = patch_size
a_ = qkv_bias
a_ = frequency_stride
a_ = time_stride
a_ = max_length
a_ = num_mel_bins
| 303
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(SCREAMING_SNAKE_CASE_ ) , """Tatoeba directory does not exist.""" )
class snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self) ->int:
a_ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->Any:
self.resolver.convert_models(["heb-eng"])
@slow
def UpperCAmelCase__ ( self) ->str:
a_ , a_ = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__UpperCAmelCase)
assert mmeta["long_pair"] == "heb-eng"
| 303
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : str = """xlm-roberta"""
def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) ->Union[str, Any]:
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = use_cache
a_ = classifier_dropout
class snake_case ( SCREAMING_SNAKE_CASE_ ):
@property
def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
a_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 303
| 1
|
"""simple docstring"""
def UpperCamelCase ( ) ->str:
"""simple docstring"""
for n in range(1 , 1_000_000 ):
yield n * (n + 1) // 2
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = 1
a_ = 2
while i * i <= n:
a_ = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def UpperCamelCase ( ) ->Tuple:
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(UpperCAmelCase ) > 500 )
if __name__ == "__main__":
print(solution())
| 303
|
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=24 , __UpperCAmelCase=2 , __UpperCAmelCase=6 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=10_00 , ) ->List[str]:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = scope
a_ = range_bbox
def UpperCAmelCase__ ( self) ->int:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
a_ = bbox[i, j, 3]
a_ = bbox[i, j, 1]
a_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a_ = bbox[i, j, 2]
a_ = bbox[i, j, 0]
a_ = t
a_ = None
if self.use_input_mask:
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self) ->List[str]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Any:
a_ = LiltModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Union[str, Any]:
a_ = self.num_labels
a_ = LiltForTokenClassification(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Dict:
a_ = LiltForQuestionAnswering(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self) ->str:
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ : List[str] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : Any = False
a_ : Dict = False
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int:
return True
def UpperCAmelCase__ ( self) ->str:
a_ = LiltModelTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ = type
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->str:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->List[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = LiltModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
@slow
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__UpperCAmelCase)
a_ = torch.tensor([[1, 2]] , device=__UpperCAmelCase)
a_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__UpperCAmelCase)
# forward pass
with torch.no_grad():
a_ = model(input_ids=__UpperCAmelCase , bbox=__UpperCAmelCase)
a_ = torch.Size([1, 2, 7_68])
a_ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__UpperCAmelCase , )
self.assertTrue(outputs.last_hidden_state.shape , __UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __UpperCAmelCase , atol=1E-3))
| 303
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase_ = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['DeiTFeatureExtractor']
UpperCamelCase_ = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def UpperCAmelCase__ ( self) ->Any:
a_ = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(__UpperCAmelCase , "embed_dim"))
self.parent.assertTrue(hasattr(__UpperCAmelCase , "num_heads"))
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=[16, 48, 96] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ) ->Optional[int]:
a_ = parent
a_ = batch_size
a_ = image_size
a_ = patch_sizes
a_ = patch_stride
a_ = patch_padding
a_ = is_training
a_ = use_labels
a_ = num_labels
a_ = num_channels
a_ = embed_dim
a_ = num_heads
a_ = stride_kv
a_ = depth
a_ = cls_token
a_ = attention_drop_rate
a_ = initializer_range
a_ = layer_norm_eps
def UpperCAmelCase__ ( self) ->Any:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ = None
if self.use_labels:
# create a random int32 tensor of given shape
a_ = ids_tensor([self.batch_size] , self.num_labels)
a_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self) ->Union[str, Any]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[Any]:
a_ = TFCvtModel(config=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , training=__UpperCAmelCase)
a_ = (self.image_size, self.image_size)
a_ , a_ = image_size[0], image_size[1]
for i in range(len(self.depth)):
a_ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
a_ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str:
a_ = self.num_labels
a_ = TFCvtForImageClassification(__UpperCAmelCase)
a_ = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ = config_and_inputs
a_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Union[str, Any] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
a_ : List[Any] = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
a_ : Any = False
a_ : Dict = False
a_ : Optional[int] = False
a_ : List[Any] = False
a_ : List[Any] = False
def UpperCAmelCase__ ( self) ->List[str]:
a_ = TFCvtModelTester(self)
a_ = TFCvtConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[str]:
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions")
def UpperCAmelCase__ ( self) ->Dict:
pass
@unittest.skip(reason="Cvt does not use inputs_embeds")
def UpperCAmelCase__ ( self) ->List[str]:
pass
@unittest.skip(reason="Cvt does not support input and output embeddings")
def UpperCAmelCase__ ( self) ->Optional[Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
def UpperCAmelCase__ ( self) ->Dict:
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def UpperCAmelCase__ ( self) ->List[str]:
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8")
def UpperCAmelCase__ ( self) ->Dict:
a_ = tf.keras.mixed_precision.Policy("mixed_float16")
tf.keras.mixed_precision.set_global_policy(__UpperCAmelCase)
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32")
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(__UpperCAmelCase)
a_ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase):
a_ = model_class(__UpperCAmelCase)
a_ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase))
a_ = outputs.hidden_states
a_ = len(self.model_tester.depth)
self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->str:
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = TFCvtModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self) ->int:
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def UpperCAmelCase__ ( self) ->Any:
a_ = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=__UpperCAmelCase , return_tensors="tf")
# forward pass
a_ = model(**__UpperCAmelCase)
# verify the logits
a_ = tf.TensorShape((1, 10_00))
self.assertEqual(outputs.logits.shape , __UpperCAmelCase)
a_ = tf.constant([0.9_285, 0.9_015, -0.3_150])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __UpperCAmelCase , atol=1E-4))
| 303
| 1
|
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = hf_hub_url(repo_id=UpperCAmelCase , path=UpperCAmelCase , revision=UpperCAmelCase )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(UpperCAmelCase )}'''
| 303
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Dict = """Speech2TextFeatureExtractor"""
a_ : str = """Speech2TextTokenizer"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase) ->List[str]:
super().__init__(__UpperCAmelCase , __UpperCAmelCase)
a_ = self.feature_extractor
a_ = False
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->Optional[int]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase)
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.")
a_ = kwargs.pop("raw_speech")
else:
a_ = kwargs.pop("audio" , __UpperCAmelCase)
a_ = kwargs.pop("sampling_rate" , __UpperCAmelCase)
a_ = kwargs.pop("text" , __UpperCAmelCase)
if len(__UpperCAmelCase) > 0:
a_ = args[0]
a_ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if audio is not None:
a_ = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase)
if text is not None:
a_ = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase)
if text is None:
return inputs
elif audio is None:
return encodings
else:
a_ = encodings["input_ids"]
return inputs
def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->str:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase)
def UpperCAmelCase__ ( self , *__UpperCAmelCase , **__UpperCAmelCase) ->int:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase)
@contextmanager
def UpperCAmelCase__ ( self) ->Tuple:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call.")
a_ = True
a_ = self.tokenizer
yield
a_ = self.feature_extractor
a_ = False
| 303
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase_ = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['LayoutLMv3FeatureExtractor']
UpperCamelCase_ = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 1
|
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class snake_case :
a_ : str
a_ : str = None
@staticmethod
def UpperCAmelCase__ ( ) ->Any:
raise NotImplementedError
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase) ->Any:
raise NotImplementedError
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Union[str, Any]:
raise NotImplementedError
def UpperCAmelCase__ ( self) ->List[Any]:
if not self.is_available():
raise RuntimeError(
F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''')
@classmethod
def UpperCAmelCase__ ( cls) ->str:
return F'''`pip install {cls.pip_package or cls.name}`'''
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Dict = """optuna"""
@staticmethod
def UpperCAmelCase__ ( ) ->Dict:
return is_optuna_available()
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase) ->Optional[Any]:
return run_hp_search_optuna(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->List[Any]:
return default_hp_space_optuna(__UpperCAmelCase)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : str = """ray"""
a_ : Tuple = """'ray[tune]'"""
@staticmethod
def UpperCAmelCase__ ( ) ->Any:
return is_ray_available()
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase) ->Optional[Any]:
return run_hp_search_ray(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->List[str]:
return default_hp_space_ray(__UpperCAmelCase)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Tuple = """sigopt"""
@staticmethod
def UpperCAmelCase__ ( ) ->str:
return is_sigopt_available()
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase) ->Optional[int]:
return run_hp_search_sigopt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Tuple:
return default_hp_space_sigopt(__UpperCAmelCase)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[str, Any] = """wandb"""
@staticmethod
def UpperCAmelCase__ ( ) ->Any:
return is_wandb_available()
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase) ->str:
return run_hp_search_wandb(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->int:
return default_hp_space_wandb(__UpperCAmelCase)
UpperCamelCase_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCamelCase ( ) ->str:
"""simple docstring"""
a_ = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(UpperCAmelCase ) > 0:
a_ = available_backends[0].name
if len(UpperCAmelCase ) > 1:
logger.info(
F'''{len(UpperCAmelCase )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
F''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 303
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
UpperCamelCase_ = False
class snake_case ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion")
pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg")
a_ = torch.manual_seed(0)
a_ = pipe(
image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
a_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
a_ = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 303
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase_ = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.