code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
__UpperCamelCase = 5
__UpperCamelCase = 10
@require_sentencepiece
@require_tokenizers
class _A ( __lowercase , unittest.TestCase ):
lowercase__: Optional[Any] = SpeechaTextTokenizer
lowercase__: Optional[int] = False
lowercase__: Union[str, Any] = True
def lowercase__ ( self : List[Any] ) -> str:
"""simple docstring"""
super().setUp()
__snake_case : Tuple = sp.SentencePieceProcessor()
spm_model.Load(__magic_name__ )
__snake_case : int = ["""<s>""", """<pad>""", """</s>""", """<unk>"""]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__magic_name__ ) )]
__snake_case : int = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
__snake_case : Union[str, Any] = Path(self.tmpdirname )
save_json(__magic_name__ , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__magic_name__ , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
__snake_case : Dict = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
__snake_case : Any = """<pad>"""
__snake_case : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__magic_name__ ) , 10_01 )
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_01 )
def lowercase__ ( self : str ) -> Tuple:
"""simple docstring"""
__snake_case : Union[str, Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
__snake_case : List[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [2_89, 50, 14, 1_74, 3_86] , )
__snake_case : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
__snake_case : Union[str, Any] = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(__magic_name__ , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
__snake_case : List[Any] = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def lowercase__ ( self : int ) -> Tuple:
"""simple docstring"""
__snake_case : List[Any] = {"""input_ids""": [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="""facebook/s2t-small-mustc-en-de-st""" , revision="""a14f04cf0776c02f62a8cb800cf7909e15ea23ad""" , )
@require_sentencepiece
class _A ( unittest.TestCase ):
lowercase__: List[Any] = '''valhalla/s2t_mustc_multilinguial_medium'''
lowercase__: Union[str, Any] = '''C\'est trop cool'''
lowercase__: Union[str, Any] = '''Esto es genial'''
@classmethod
def lowercase__ ( cls : Optional[Any] ) -> int:
"""simple docstring"""
__snake_case : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id["""pt"""] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["""ru"""] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["""it"""] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["""de"""] , 11 )
def lowercase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 1_00_00 )
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertIn(__magic_name__ , self.tokenizer.all_special_ids )
__snake_case : Tuple = [ES_CODE, 4, 16_01, 47, 76_47, 2]
__snake_case : Any = self.tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
__snake_case : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertNotIn(self.tokenizer.eos_token , __magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = """fr"""
__snake_case : int = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __magic_name__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def lowercase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Dict = """fr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
__snake_case : Any = """es"""
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 26 |
import numpy as np
def lowerCAmelCase_ (lowerCAmelCase__: np.ndarray , lowerCAmelCase__: float ):
"""simple docstring"""
return np.where(vector > 0 , lowerCAmelCase__ , (alpha * (np.exp(lowerCAmelCase__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 556 | 0 |
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __lowercase :
def __init__(self , A , ):
lowerCamelCase_ : int = parent
lowerCamelCase_ : Dict = 1_3
lowerCamelCase_ : Any = 7
lowerCamelCase_ : Dict = 3_0
lowerCamelCase_ : Optional[Any] = self.seq_length + self.mem_len
lowerCamelCase_ : Tuple = 1_5
lowerCamelCase_ : Tuple = True
lowerCamelCase_ : List[Any] = True
lowerCamelCase_ : Dict = 9_9
lowerCamelCase_ : Any = [1_0, 5_0, 8_0]
lowerCamelCase_ : List[str] = 3_2
lowerCamelCase_ : Tuple = 3_2
lowerCamelCase_ : Optional[Any] = 4
lowerCamelCase_ : Union[str, Any] = 8
lowerCamelCase_ : Dict = 1_2_8
lowerCamelCase_ : Dict = 2
lowerCamelCase_ : Tuple = 2
lowerCamelCase_ : Any = None
lowerCamelCase_ : Optional[Any] = 1
lowerCamelCase_ : Union[str, Any] = 0
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = self.vocab_size - 1
lowerCamelCase_ : Dict = 0.01
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Union[str, Any] = None
if self.use_labels:
lowerCamelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : str = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def UpperCAmelCase__ (self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def UpperCAmelCase__ (self , A , A , A , A ):
lowerCamelCase_ : int = TFTransfoXLModel(A )
lowerCamelCase_ : Tuple = model(A ).to_tuple()
lowerCamelCase_ : Union[str, Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a}
lowerCamelCase_ : Any = model(A ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCAmelCase__ (self , A , A , A , A ):
lowerCamelCase_ : List[Any] = TFTransfoXLLMHeadModel(A )
lowerCamelCase_ : List[Any] = model(A ).to_tuple()
lowerCamelCase_ : Optional[int] = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
lowerCamelCase_ : Union[str, Any] = model(A ).to_tuple()
lowerCamelCase_ : Tuple = model([input_ids_a, mems_a] ).to_tuple()
lowerCamelCase_ : List[str] = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
lowerCamelCase_ : str = model(A ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCAmelCase__ (self , A , A , A , A ):
lowerCamelCase_ : Dict = TFTransfoXLForSequenceClassification(A )
lowerCamelCase_ : Optional[int] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = self.prepare_config_and_inputs()
(lowerCamelCase_) : List[Any] = config_and_inputs
lowerCamelCase_ : Tuple = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class __lowercase ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase : Any = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowerCamelCase : Tuple = () if is_tf_available() else ()
lowerCamelCase : Any = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowerCamelCase : str = False
lowerCamelCase : Optional[int] = False
lowerCamelCase : Dict = False
lowerCamelCase : Optional[int] = False
def UpperCAmelCase__ (self , A , A , A , A , A ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = TFTransfoXLModelTester(self )
lowerCamelCase_ : Optional[Any] = ConfigTester(self , config_class=A , d_embed=3_7 )
def UpperCAmelCase__ (self ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ (self ):
self.model_tester.set_seed()
lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*A )
def UpperCAmelCase__ (self ):
self.model_tester.set_seed()
lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : List[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowerCamelCase_ : Optional[Any] = model_class(A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowerCamelCase_ : str = model.get_output_embeddings()
assert isinstance(A , tf.keras.layers.Layer )
lowerCamelCase_ : int = model.get_bias()
assert name is None
else:
lowerCamelCase_ : Tuple = model.get_output_embeddings()
assert x is None
lowerCamelCase_ : int = model.get_bias()
assert name is None
def UpperCAmelCase__ (self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def UpperCAmelCase__ (self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : Optional[Any] = TFTransfoXLModel.from_pretrained(A )
self.assertIsNotNone(A )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def UpperCAmelCase__ (self ):
pass
@require_tf
class __lowercase ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
lowerCamelCase_ : Optional[Any] = tf.convert_to_tensor([[3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowerCamelCase_ : Union[str, Any] = [3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0,3_3,1,1_8_5_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_8,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowerCamelCase_ : Optional[Any] = model.generate(A , max_length=2_0_0 , do_sample=A )
self.assertListEqual(output_ids[0].numpy().tolist() , A )
| 708 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__lowercase : Union[str, Any] = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def lowercase_ ( _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Tuple:
'''simple docstring'''
if attention_mask is None:
lowerCamelCase_ : Union[str, Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCamelCase_ : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCamelCase_ : List[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase_ : Tuple = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase_ : Tuple = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __lowercase :
def __init__(self , A , A=1_3 , A=7 , A=True , A=False , A=9_9 , A=1_6 , A=2 , A=4 , A=4 , A="gelu" , A=0.1 , A=0.1 , A=3_2 , A=2 , A=1 , A=0 , A=0.02 , ):
lowerCamelCase_ : int = parent
lowerCamelCase_ : Optional[Any] = batch_size
lowerCamelCase_ : Union[str, Any] = seq_length
lowerCamelCase_ : List[str] = is_training
lowerCamelCase_ : Optional[Any] = use_labels
lowerCamelCase_ : Tuple = vocab_size
lowerCamelCase_ : Optional[int] = hidden_size
lowerCamelCase_ : str = num_hidden_layers
lowerCamelCase_ : Tuple = num_attention_heads
lowerCamelCase_ : Union[str, Any] = intermediate_size
lowerCamelCase_ : Union[str, Any] = hidden_act
lowerCamelCase_ : Tuple = hidden_dropout_prob
lowerCamelCase_ : List[str] = attention_probs_dropout_prob
lowerCamelCase_ : Any = max_position_embeddings
lowerCamelCase_ : Union[str, Any] = eos_token_id
lowerCamelCase_ : Union[str, Any] = pad_token_id
lowerCamelCase_ : str = bos_token_id
lowerCamelCase_ : int = initializer_range
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowerCamelCase_ : List[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowerCamelCase_ : Any = shift_tokens_right(A , 1 , 2 )
lowerCamelCase_ : Any = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=A , )
lowerCamelCase_ : List[str] = prepare_blenderbot_inputs_dict(A , A , A )
return config, inputs_dict
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase__ (self , A , A , A ):
lowerCamelCase_ : Any = 2_0
lowerCamelCase_ : Dict = model_class_name(A )
lowerCamelCase_ : List[str] = model.encode(inputs_dict['''input_ids'''] )
lowerCamelCase_, lowerCamelCase_ : Tuple = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCamelCase_ : Any = model.init_cache(decoder_input_ids.shape[0] , A , A )
lowerCamelCase_ : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowerCamelCase_ : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase_ : List[Any] = model.decode(
decoder_input_ids[:, :-1] , A , decoder_attention_mask=A , past_key_values=A , decoder_position_ids=A , )
lowerCamelCase_ : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCamelCase_ : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , A , decoder_attention_mask=A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=A , )
lowerCamelCase_ : str = model.decode(A , A )
lowerCamelCase_ : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def UpperCAmelCase__ (self , A , A , A ):
lowerCamelCase_ : Tuple = 2_0
lowerCamelCase_ : Dict = model_class_name(A )
lowerCamelCase_ : Optional[Any] = model.encode(inputs_dict['''input_ids'''] )
lowerCamelCase_, lowerCamelCase_ : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCamelCase_ : List[str] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCamelCase_ : Any = model.init_cache(decoder_input_ids.shape[0] , A , A )
lowerCamelCase_ : Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase_ : List[Any] = model.decode(
decoder_input_ids[:, :-1] , A , decoder_attention_mask=A , past_key_values=A , decoder_position_ids=A , )
lowerCamelCase_ : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCamelCase_ : str = model.decode(
decoder_input_ids[:, -1:] , A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=A , decoder_position_ids=A , )
lowerCamelCase_ : Union[str, Any] = model.decode(A , A , decoder_attention_mask=A )
lowerCamelCase_ : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class __lowercase ( unittest.TestCase ):
lowerCamelCase : Optional[int] = 99
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
lowerCamelCase_ : Optional[int] = input_ids.shape[0]
lowerCamelCase_ : int = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : Any = self._get_config_and_data()
lowerCamelCase_ : List[str] = FlaxBlenderbotSmallForConditionalGeneration(A )
lowerCamelCase_ : Optional[Any] = lm_model(input_ids=A )
lowerCamelCase_ : List[Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
lowerCamelCase_ : Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(A )
lowerCamelCase_ : List[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
lowerCamelCase_ : str = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
lowerCamelCase_ : Optional[int] = lm_model(input_ids=A , decoder_input_ids=A )
lowerCamelCase_ : List[str] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
lowerCamelCase_ : Dict = shift_tokens_right(A , 1 , 2 )
lowerCamelCase_ : Union[str, Any] = np.equal(A , 1 ).astype(np.floataa ).sum()
lowerCamelCase_ : List[str] = np.equal(A , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(A , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __lowercase ( _lowercase , unittest.TestCase , _lowercase ):
lowerCamelCase : Dict = True
lowerCamelCase : str = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCamelCase : List[Any] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = FlaxBlenderbotSmallModelTester(self )
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(A , A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(A , A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_ : Optional[int] = self._prepare_for_class(A , A )
lowerCamelCase_ : Any = model_class(A )
@jax.jit
def encode_jitted(A , A=None , **A ):
return model.encode(input_ids=A , attention_mask=A )
with self.subTest('''JIT Enabled''' ):
lowerCamelCase_ : int = encode_jitted(**A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCamelCase_ : Union[str, Any] = encode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_ : Any = model_class(A )
lowerCamelCase_ : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
lowerCamelCase_ : Union[str, Any] = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(A , A , A ):
return model.decode(
decoder_input_ids=A , decoder_attention_mask=A , encoder_outputs=A , )
with self.subTest('''JIT Enabled''' ):
lowerCamelCase_ : Tuple = decode_jitted(**A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCamelCase_ : Optional[Any] = decode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase__ (self ):
for model_class_name in self.all_model_classes:
lowerCamelCase_ : Union[str, Any] = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCamelCase_ : Optional[Any] = np.ones((1, 1) ) * model.config.eos_token_id
lowerCamelCase_ : Optional[int] = model(A )
self.assertIsNotNone(A )
| 357 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __snake_case ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase_ : Tuple = "speech_to_text_2"
lowerCAmelCase_ : Dict = ["past_key_values"]
lowerCAmelCase_ : Optional[int] = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self :Optional[Any] , UpperCamelCase__ :Optional[Any]=10_000 , UpperCamelCase__ :Tuple=6 , UpperCamelCase__ :str=2_048 , UpperCamelCase__ :Tuple=4 , UpperCamelCase__ :Optional[Any]=0.0 , UpperCamelCase__ :Dict=True , UpperCamelCase__ :str="relu" , UpperCamelCase__ :Dict=256 , UpperCamelCase__ :Union[str, Any]=0.1 , UpperCamelCase__ :List[str]=0.0 , UpperCamelCase__ :str=0.0 , UpperCamelCase__ :Tuple=0.02 , UpperCamelCase__ :Optional[Any]=2 , UpperCamelCase__ :Optional[int]=True , UpperCamelCase__ :Union[str, Any]=1 , UpperCamelCase__ :Tuple=0 , UpperCamelCase__ :List[str]=2 , UpperCamelCase__ :Union[str, Any]=1_024 , **UpperCamelCase__ :Optional[Any] , ):
_a = vocab_size
_a = d_model
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = decoder_layerdrop
_a = use_cache
_a = decoder_layers
_a = scale_embedding # scale factor will be sqrt(d_model) if True
_a = max_target_positions
super().__init__(
pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , **lowercase__ , )
| 388 | '''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 251 | 0 |
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
"""simple docstring"""
lowercase = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowercase , lowercase = True, True
lowercase = dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return path
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase = 0
lowercase = -1
for i in range(_UpperCamelCase ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
lowercase = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
lowercase , lowercase = check_circuit_or_path(_UpperCamelCase , _UpperCamelCase )
if check == 3:
print('graph is not Eulerian' )
print('no path' )
return
lowercase = 1
if check == 2:
lowercase = odd_node
print('graph has a Euler path' )
if check == 1:
print('graph has a Euler cycle' )
lowercase = dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
print(_UpperCamelCase )
def __snake_case ( ):
"""simple docstring"""
lowercase = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowercase = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowercase = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowercase = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowercase = {
1: [],
2: []
# all degree is zero
}
lowercase = 10
check_euler(_UpperCamelCase , _UpperCamelCase )
check_euler(_UpperCamelCase , _UpperCamelCase )
check_euler(_UpperCamelCase , _UpperCamelCase )
check_euler(_UpperCamelCase , _UpperCamelCase )
check_euler(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 721 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a__ ( _snake_case ):
"""simple docstring"""
def __init__( self :Union[str, Any] , lowercase__ :NestedDataStructureLike[PathLike] , lowercase__ :Optional[NamedSplit] = None , lowercase__ :Optional[Features] = None , lowercase__ :str = None , lowercase__ :bool = False , lowercase__ :bool = False , lowercase__ :Optional[int] = None , **lowercase__ :Any , ):
super().__init__(
lowercase__ , split=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ , streaming=lowercase__ , num_proc=lowercase__ , **lowercase__ , )
lowercase = path_or_paths if isinstance(lowercase__ , lowercase__ ) else {self.split: path_or_paths}
lowercase = Text(
cache_dir=lowercase__ , data_files=lowercase__ , features=lowercase__ , **lowercase__ , )
def __UpperCAmelCase ( self :Any ):
# Build iterable dataset
if self.streaming:
lowercase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase = None
lowercase = None
lowercase = None
lowercase = None
self.builder.download_and_prepare(
download_config=lowercase__ , download_mode=lowercase__ , verification_mode=lowercase__ , base_path=lowercase__ , num_proc=self.num_proc , )
lowercase = self.builder.as_dataset(
split=self.split , verification_mode=lowercase__ , in_memory=self.keep_in_memory )
return dataset
| 314 | 0 |
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def lowercase__ ( __UpperCamelCase )-> Any:
UpperCamelCase = {}
UpperCamelCase = job["""started_at"""]
UpperCamelCase = job["""completed_at"""]
UpperCamelCase = date_parser.parse(__UpperCamelCase )
UpperCamelCase = date_parser.parse(__UpperCamelCase )
UpperCamelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
UpperCamelCase = start
UpperCamelCase = end
UpperCamelCase = duration_in_min
return job_info
def lowercase__ ( __UpperCamelCase , __UpperCamelCase=None )-> int:
UpperCamelCase = None
if token is not None:
UpperCamelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
UpperCamelCase = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
UpperCamelCase = requests.get(__UpperCamelCase , headers=__UpperCamelCase ).json()
UpperCamelCase = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(__UpperCamelCase ) for job in result["""jobs"""]} )
UpperCamelCase = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__UpperCamelCase ):
UpperCamelCase = requests.get(url + F"&page={i + 2}" , headers=__UpperCamelCase ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(__UpperCamelCase ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = get_job_time(args.workflow_run_id)
SCREAMING_SNAKE_CASE__ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'{k}: {v["duration"]}')
| 301 |
'''simple docstring'''
import argparse
import copy
def lowercase__ ( __UpperCamelCase )-> Union[str, Any]:
UpperCamelCase = {}
with open(__UpperCamelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
UpperCamelCase = []
_list.append([line.split()[1], line.split()[2]] )
UpperCamelCase = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
UpperCamelCase = []
_list.append([line.split()[0], line.split()[2]] )
UpperCamelCase = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
with open(__UpperCamelCase ) as f:
UpperCamelCase = f.read(1 )
UpperCamelCase = start_node
UpperCamelCase = []
UpperCamelCase = start_node
UpperCamelCase = 0
while visiting not in first_solution:
UpperCamelCase = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__UpperCamelCase ) and k[0] not in first_solution:
UpperCamelCase = k[1]
UpperCamelCase = k[0]
first_solution.append(__UpperCamelCase )
UpperCamelCase = distance_of_first_solution + int(__UpperCamelCase )
UpperCamelCase = best_node
first_solution.append(__UpperCamelCase )
UpperCamelCase = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
UpperCamelCase = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> List[Any]:
UpperCamelCase = []
for n in solution[1:-1]:
UpperCamelCase = solution.index(__UpperCamelCase )
for kn in solution[1:-1]:
UpperCamelCase = solution.index(__UpperCamelCase )
if n == kn:
continue
UpperCamelCase = copy.deepcopy(__UpperCamelCase )
UpperCamelCase = kn
UpperCamelCase = n
UpperCamelCase = 0
for k in _tmp[:-1]:
UpperCamelCase = _tmp[_tmp.index(__UpperCamelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
UpperCamelCase = distance + int(i[1] )
_tmp.append(__UpperCamelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
UpperCamelCase = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __UpperCamelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
UpperCamelCase = 1
UpperCamelCase = first_solution
UpperCamelCase = []
UpperCamelCase = distance_of_first_solution
UpperCamelCase = solution
while count <= iters:
UpperCamelCase = find_neighborhood(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase = 0
UpperCamelCase = neighborhood[index_of_best_solution]
UpperCamelCase = len(__UpperCamelCase ) - 1
UpperCamelCase = False
while not found:
UpperCamelCase = 0
while i < len(__UpperCamelCase ):
if best_solution[i] != solution[i]:
UpperCamelCase = best_solution[i]
UpperCamelCase = solution[i]
break
UpperCamelCase = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
UpperCamelCase = True
UpperCamelCase = best_solution[:-1]
UpperCamelCase = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
UpperCamelCase = cost
UpperCamelCase = solution
else:
UpperCamelCase = index_of_best_solution + 1
UpperCamelCase = neighborhood[index_of_best_solution]
if len(__UpperCamelCase ) >= size:
tabu_list.pop(0 )
UpperCamelCase = count + 1
return best_solution_ever, best_cost
def lowercase__ ( __UpperCamelCase=None )-> Tuple:
UpperCamelCase = generate_neighbours(args.File )
UpperCamelCase ,UpperCamelCase = generate_first_solution(
args.File , __UpperCamelCase )
UpperCamelCase ,UpperCamelCase = tabu_search(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 301 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ :str = {
'configuration_upernet': ['UperNetConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :Dict = [
'UperNetForSemanticSegmentation',
'UperNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
a_ :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 250 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a_ :Optional[Any] = logging.get_logger(__name__)
class lowercase :
def __init__( self : Dict , _lowercase : str = None , _lowercase : uuid.UUID = None , _lowercase : List[str]=None , _lowercase : List[Any]=None ):
if not conversation_id:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = uuid.uuida()
if past_user_inputs is None:
SCREAMING_SNAKE_CASE__ : List[str] = []
if generated_responses is None:
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : uuid.UUID = conversation_id
SCREAMING_SNAKE_CASE__ : List[str] = past_user_inputs
SCREAMING_SNAKE_CASE__ : List[str] = generated_responses
SCREAMING_SNAKE_CASE__ : Optional[str] = text
def __eq__( self : Optional[Any] , _lowercase : List[str] ):
if not isinstance(_lowercase , _lowercase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowercase__ ( self : int , _lowercase : str , _lowercase : bool = False ):
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = text
def lowercase__ ( self : Union[str, Any] ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
SCREAMING_SNAKE_CASE__ : List[Any] = None
def lowercase__ ( self : Optional[int] , _lowercase : str ):
self.generated_responses.append(_lowercase )
def lowercase__ ( self : int ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Any ):
SCREAMING_SNAKE_CASE__ : Dict = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
SCREAMING_SNAKE_CASE__ : Dict = '''user''' if is_user else '''bot'''
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
_UpperCAmelCase , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowercase ( _UpperCAmelCase ):
def __init__( self : str , *_lowercase : List[Any] , **_lowercase : Union[str, Any] ):
super().__init__(*_lowercase , **_lowercase )
if self.tokenizer.pad_token_id is None:
SCREAMING_SNAKE_CASE__ : int = self.tokenizer.eos_token
def lowercase__ ( self : List[Any] , _lowercase : Union[str, Any]=None , _lowercase : Dict=None , _lowercase : Tuple=None , **_lowercase : List[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Tuple = {}
if min_length_for_response is not None:
SCREAMING_SNAKE_CASE__ : List[str] = min_length_for_response
if minimum_tokens is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = minimum_tokens
if "max_length" in generate_kwargs:
SCREAMING_SNAKE_CASE__ : List[Any] = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
SCREAMING_SNAKE_CASE__ : str = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_lowercase )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Union[str, Any] , _lowercase : Union[Conversation, List[Conversation]] , _lowercase : Dict=0 , **_lowercase : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Tuple = super().__call__(_lowercase , num_workers=_lowercase , **_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) == 1:
return outputs[0]
return outputs
def lowercase__ ( self : str , _lowercase : Conversation , _lowercase : Optional[int]=32 ):
if not isinstance(_lowercase , _lowercase ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer._build_conversation_input_ids(_lowercase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
SCREAMING_SNAKE_CASE__ : Optional[int] = self._legacy_parse_and_tokenize(_lowercase )
if self.framework == "pt":
SCREAMING_SNAKE_CASE__ : List[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowercase__ ( self : int , _lowercase : Optional[int] , _lowercase : Dict=10 , **_lowercase : Any ):
SCREAMING_SNAKE_CASE__ : List[str] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
SCREAMING_SNAKE_CASE__ : Any = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = max_length - minimum_tokens
SCREAMING_SNAKE_CASE__ : Tuple = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_inputs['''attention_mask'''][:, -trim:]
SCREAMING_SNAKE_CASE__ : Dict = model_inputs.pop('''conversation''' )
SCREAMING_SNAKE_CASE__ : Any = max_length
SCREAMING_SNAKE_CASE__ : Tuple = self.model.generate(**_lowercase , **_lowercase )
if self.model.config.is_encoder_decoder:
SCREAMING_SNAKE_CASE__ : List[str] = 1
else:
SCREAMING_SNAKE_CASE__ : List[str] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowercase__ ( self : Union[str, Any] , _lowercase : List[str] , _lowercase : Dict=True ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_outputs['''output_ids''']
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(_lowercase )
return conversation
def lowercase__ ( self : Any , _lowercase : Conversation ):
SCREAMING_SNAKE_CASE__ : int = self.tokenizer.eos_token_id
SCREAMING_SNAKE_CASE__ : int = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
if len(_lowercase ) > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE__ : Optional[int] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 250 | 1 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE : Optional[int] = random.Random()
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__=1.0 ,lowerCAmelCase__=None ,lowerCAmelCase__=None ):
if rng is None:
A__ = global_rng
A__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __a , __a=7 , __a=400 , __a=2000 , __a=10 , __a=160 , __a=8 , __a=0.0 , __a=4000 , __a=False , __a=True , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = min_seq_length
A__ = max_seq_length
A__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ = padding_value
A__ = sampling_rate
A__ = return_attention_mask
A__ = do_normalize
A__ = feature_size
A__ = chunk_length
A__ = hop_length
def _UpperCAmelCase ( self ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _UpperCAmelCase ( self , __a=False , __a=False ):
"""simple docstring"""
def _flatten(__a ):
return list(itertools.chain(*lowerCamelCase_ ) )
if equal_length:
A__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ = [np.asarray(lowerCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case_ ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: str = WhisperFeatureExtractor if is_speech_available() else None
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = WhisperFeatureExtractionTester(self )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = feat_extract_first.save_pretrained(lowerCamelCase_ )[0]
check_json_file_has_correct_format(lowerCamelCase_ )
A__ = self.feature_extraction_class.from_pretrained(lowerCamelCase_ )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = feat_extract_first.mel_filters
A__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = os.path.join(lowerCamelCase_ , 'feat_extract.json' )
feat_extract_first.to_json_file(lowerCamelCase_ )
A__ = self.feature_extraction_class.from_json_file(lowerCamelCase_ )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = feat_extract_first.mel_filters
A__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
# Test feature size
A__ = feature_extractor(lowerCamelCase_ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
A__ = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
A__ = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
# Test batched
A__ = feature_extractor(lowerCamelCase_ , return_tensors='np' ).input_features
A__ = feature_extractor(lowerCamelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
A__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ = np.asarray(lowerCamelCase_ )
A__ = feature_extractor(lowerCamelCase_ , return_tensors='np' ).input_features
A__ = feature_extractor(lowerCamelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
# Test truncation required
A__ = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
A__ = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
A__ = [x[: feature_extractor.n_samples] for x in speech_inputs]
A__ = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs_truncated]
A__ = feature_extractor(lowerCamelCase_ , return_tensors='np' ).input_features
A__ = feature_extractor(lowerCamelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
def _UpperCAmelCase ( self ):
"""simple docstring"""
import torch
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ = np.random.rand(100 , 32 ).astype(np.floataa )
A__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
A__ = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
A__ = ds.sort('id' ).select(range(lowerCamelCase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
A__ = self._load_datasamples(1 )
A__ = WhisperFeatureExtractor()
A__ = feature_extractor(lowerCamelCase_ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowerCamelCase_ , atol=1E-4 ) )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ = self._load_datasamples(1 )[0]
A__ = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
A__ = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase_ )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase_ ) - 1 ) < 1E-3 ) )
| 260 |
"""simple docstring"""
import random
class lowerCamelCase__ :
'''simple docstring'''
@staticmethod
def UpperCamelCase__ ( lowerCamelCase_ ) -> tuple[list[int], list[int]]:
A = [ord(lowerCamelCase_ ) for i in text]
A = []
A = []
for i in plain:
A = random.randint(1 ,3_0_0 )
A = (i + k) * k
cipher.append(lowerCamelCase_ )
key.append(lowerCamelCase_ )
return cipher, key
@staticmethod
def UpperCamelCase__ ( lowerCamelCase_ ,lowerCamelCase_ ) -> str:
A = []
for i in range(len(lowerCamelCase_ ) ):
A = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowerCamelCase_ ) )
return "".join(lowerCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase , UpperCAmelCase =Onepad().encrypt("Hello")
print(c, k)
print(Onepad().decrypt(c, k))
| 617 | 0 |
def __UpperCAmelCase ( UpperCAmelCase )-> list:
"""simple docstring"""
def merge(UpperCAmelCase, UpperCAmelCase ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCAmelCase ) <= 1:
return collection
lowercase = len(UpperCAmelCase ) // 2
return merge(merge_sort(collection[:mid] ), merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ = input("Enter numbers separated by a comma:\n").strip()
A_ = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 706 | import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
A_ = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def __UpperCAmelCase ( UpperCAmelCase )-> Dict:
"""simple docstring"""
lowercase = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(UpperCAmelCase, UpperCAmelCase )
A_ = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def __UpperCAmelCase ( UpperCAmelCase )-> List[str]:
"""simple docstring"""
lowercase = list(s_dict.keys() )
for key in keys:
lowercase = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowercase = new_key.replace(UpperCAmelCase, UpperCAmelCase )
print(f'{key} -> {new_key}' )
lowercase = s_dict.pop(UpperCAmelCase )
return s_dict
def __UpperCAmelCase ( UpperCAmelCase )-> str:
"""simple docstring"""
lowercase ,lowercase = emb.weight.shape
lowercase = nn.Linear(UpperCAmelCase, UpperCAmelCase, bias=UpperCAmelCase )
lowercase = emb.weight.data
return lin_layer
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> bytes:
"""simple docstring"""
os.makedirs(UpperCAmelCase, exist_ok=UpperCAmelCase )
lowercase = os.path.basename(UpperCAmelCase )
lowercase = url.split('''/''' )[-2]
lowercase = os.path.join(UpperCAmelCase, UpperCAmelCase )
if os.path.exists(UpperCAmelCase ) and not os.path.isfile(UpperCAmelCase ):
raise RuntimeError(f'{download_target} exists and is not a regular file' )
if os.path.isfile(UpperCAmelCase ):
lowercase = open(UpperCAmelCase, '''rb''' ).read()
if hashlib.shaaaa(UpperCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'{download_target} exists, but the SHA256 checksum does not match; re-downloading the file' )
with urllib.request.urlopen(UpperCAmelCase ) as source, open(UpperCAmelCase, '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ), ncols=80, unit='''iB''', unit_scale=UpperCAmelCase, unit_divisor=1024 ) as loop:
while True:
lowercase = source.read(8192 )
if not buffer:
break
output.write(UpperCAmelCase )
loop.update(len(UpperCAmelCase ) )
lowercase = open(UpperCAmelCase, '''rb''' ).read()
if hashlib.shaaaa(UpperCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> Union[str, Any]:
"""simple docstring"""
if ".pt" not in checkpoint_path:
lowercase = _download(_MODELS[checkpoint_path] )
else:
lowercase = torch.load(UpperCAmelCase, map_location='''cpu''' )
lowercase = original_checkpoint['''dims''']
lowercase = original_checkpoint['''model_state_dict''']
lowercase = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(UpperCAmelCase )
rename_keys(UpperCAmelCase )
lowercase = True
lowercase = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
lowercase = WhisperConfig(
vocab_size=dimensions['''n_vocab'''], encoder_ffn_dim=UpperCAmelCase, decoder_ffn_dim=UpperCAmelCase, num_mel_bins=dimensions['''n_mels'''], d_model=dimensions['''n_audio_state'''], max_target_positions=dimensions['''n_text_ctx'''], encoder_layers=dimensions['''n_audio_layer'''], encoder_attention_heads=dimensions['''n_audio_head'''], decoder_layers=dimensions['''n_text_layer'''], decoder_attention_heads=dimensions['''n_text_state'''], max_source_positions=dimensions['''n_audio_ctx'''], )
lowercase = WhisperForConditionalGeneration(UpperCAmelCase )
lowercase ,lowercase = model.model.load_state_dict(UpperCAmelCase, strict=UpperCAmelCase )
if len(UpperCAmelCase ) > 0 and not set(UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f' but all the following weights are missing {missing}' )
if tie_embeds:
lowercase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowercase = proj_out_weights
model.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
A_ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 479 | 0 |
from __future__ import annotations
def a_ ( _A , _A ) -> set[str]:
"""simple docstring"""
snake_case__ , snake_case__ = set(_A ), [start]
while stack:
snake_case__ = stack.pop()
explored.add(_A )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_A )
return explored
__UpperCamelCase : int = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 328 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__UpperCamelCase : List[Any] = ["""text""", """image""", """audio"""]
def a_ ( _A ) -> Optional[int]:
"""simple docstring"""
snake_case__ = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_A , _A ):
inputs.append(create_inputs(_A ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def a_ ( _A ) -> Dict:
"""simple docstring"""
snake_case__ = []
for output in outputs:
if isinstance(_A , (str, AgentText) ):
output_types.append('text' )
elif isinstance(_A , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(_A , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class __SCREAMING_SNAKE_CASE:
def lowerCAmelCase_ ( self: Optional[int] ) -> List[str]:
self.assertTrue(hasattr(self.tool , 'inputs' ) )
self.assertTrue(hasattr(self.tool , 'outputs' ) )
snake_case__ = self.tool.inputs
for _input in inputs:
if isinstance(_input , UpperCamelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case__ = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCAmelCase_ ( self: Optional[int] ) -> Union[str, Any]:
snake_case__ = create_inputs(self.tool.inputs )
snake_case__ = self.tool(*UpperCamelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case__ = [outputs]
self.assertListEqual(output_types(UpperCamelCase ) , self.tool.outputs )
def lowerCAmelCase_ ( self: int ) -> List[str]:
self.assertTrue(hasattr(self.tool , 'description' ) )
self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any:
snake_case__ = create_inputs(self.tool.inputs )
snake_case__ = self.tool(*UpperCamelCase )
if not isinstance(UpperCamelCase , UpperCamelCase ):
snake_case__ = [outputs]
self.assertEqual(len(UpperCamelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(UpperCamelCase , self.tool.outputs ):
snake_case__ = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(UpperCamelCase , UpperCamelCase ) )
def lowerCAmelCase_ ( self: Any ) -> Optional[int]:
snake_case__ = create_inputs(self.tool.inputs )
snake_case__ = []
for _input, input_type in zip(UpperCamelCase , self.tool.inputs ):
if isinstance(UpperCamelCase , UpperCamelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case__ = self.tool(*UpperCamelCase )
if not isinstance(UpperCamelCase , UpperCamelCase ):
snake_case__ = [outputs]
self.assertEqual(len(UpperCamelCase ) , len(self.tool.outputs ) )
| 328 | 1 |
'''simple docstring'''
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Any , snake_case__ : Any , snake_case__ : int , snake_case__ : Any ) -> int:
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for a, b in zip(_lowercase , _lowercase ):
self.assertAlmostEqual(_lowercase , _lowercase , delta=_lowercase )
def _snake_case ( self : List[str] ) -> List[str]:
_lowerCamelCase = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_lowercase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def _snake_case ( self : int ) -> int:
_lowerCamelCase = None
ops.enable_eager_execution_internal()
_lowerCamelCase = tf.config.list_physical_devices('CPU' )
if len(_lowercase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
_lowerCamelCase = tf.config.list_logical_devices(device_type='CPU' )
_lowerCamelCase = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
_lowerCamelCase = GradientAccumulator()
_lowerCamelCase = tf.Variable([4.0, 3.0] )
_lowerCamelCase , _lowerCamelCase = create_optimizer(5e-5 , 1_0 , 5 )
_lowerCamelCase = tf.Variable([0.0, 0.0] , trainable=_lowercase )
def accumulate_on_replica(snake_case__ : Optional[Any] ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(snake_case__ : str , snake_case__ : int ):
with strategy.scope():
_lowerCamelCase = strategy.experimental_local_results(_lowercase )
local_variables[0].assign(_lowercase )
local_variables[1].assign(_lowercase )
strategy.run(_lowercase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_lowercase )
def _check_local_values(snake_case__ : List[str] , snake_case__ : int ):
_lowerCamelCase = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _lowercase , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , _lowercase , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] ) | 703 | import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
A = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
A = logging.get_logger(__name__)
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = 'maskformer'
lowerCAmelCase_ = {'hidden_size': 'mask_feature_size'}
lowerCAmelCase_ = ['resnet', 'swin']
lowerCAmelCase_ = ['detr']
def __init__( self : int , snake_case__ : int = 2_5_6 , snake_case__ : int = 2_5_6 , snake_case__ : float = 0.1 , snake_case__ : bool = False , snake_case__ : Optional[Dict] = None , snake_case__ : Optional[Dict] = None , snake_case__ : float = 0.02 , snake_case__ : float = 1.0 , snake_case__ : float = 1.0 , snake_case__ : float = 1.0 , snake_case__ : float = 20.0 , snake_case__ : Optional[bool] = None , **snake_case__ : Dict , ) -> int:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
_lowerCamelCase = SwinConfig(
image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(snake_case__ , snake_case__ ):
_lowerCamelCase = backbone_config.pop('model_type' )
_lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
_lowerCamelCase = config_class.from_dict(snake_case__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
_lowerCamelCase = DetrConfig()
else:
# verify that the decoder is supported
_lowerCamelCase = (
decoder_config.pop('model_type' ) if isinstance(snake_case__ , snake_case__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(snake_case__ , snake_case__ ):
_lowerCamelCase = CONFIG_MAPPING[decoder_type]
_lowerCamelCase = config_class.from_dict(snake_case__ )
_lowerCamelCase = backbone_config
_lowerCamelCase = decoder_config
# main feature dimension for the model
_lowerCamelCase = fpn_feature_size
_lowerCamelCase = mask_feature_size
# initializer
_lowerCamelCase = init_std
_lowerCamelCase = init_xavier_std
# Hungarian matcher && loss
_lowerCamelCase = cross_entropy_weight
_lowerCamelCase = dice_weight
_lowerCamelCase = mask_weight
_lowerCamelCase = use_auxiliary_loss
_lowerCamelCase = no_object_weight
_lowerCamelCase = output_auxiliary_logits
_lowerCamelCase = self.decoder_config.encoder_attention_heads
_lowerCamelCase = self.decoder_config.num_hidden_layers
super().__init__(**snake_case__ )
@classmethod
def _snake_case ( cls : Optional[int] , snake_case__ : PretrainedConfig , snake_case__ : PretrainedConfig , **snake_case__ : Tuple ) -> List[str]:
return cls(
backbone_config=snake_case__ , decoder_config=snake_case__ , **snake_case__ , )
def _snake_case ( self : Optional[Any] ) -> Dict[str, any]:
_lowerCamelCase = copy.deepcopy(self.__dict__ )
_lowerCamelCase = self.backbone_config.to_dict()
_lowerCamelCase = self.decoder_config.to_dict()
_lowerCamelCase = self.__class__.model_type
return output | 234 | 0 |
"""simple docstring"""
import socket
def lowerCamelCase__ ( ) -> Any:
lowerCamelCase_ = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
lowerCamelCase_ = socket.gethostname()
lowerCamelCase_ = 12312
sock.connect((host, port) )
sock.send(b'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
lowerCamelCase_ = sock.recv(1024 )
if not data:
break
out_file.write(_lowerCamelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 549 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase : str ) -> bool:
lowerCamelCase_ = 0
for ch in input_str:
lowerCamelCase_ = ord(_lowerCamelCase )
lowerCamelCase_ = pow(2 , _lowerCamelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 549 | 1 |
import argparse
import os
import re
__A : List[Any] = "src/diffusers"
# Pattern that looks at the indentation in a line.
__A : Dict = re.compile(r"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
__A : Optional[int] = re.compile(r"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__A : Union[str, Any] = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
__A : List[Any] = re.compile(r"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__A : List[str] = re.compile(r"\[([^\]]+)\]")
def UpperCAmelCase ( UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCAmelCase = _re_indent.search(UpperCamelCase__ )
return "" if search is None else search.groups()[0]
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__="" , UpperCamelCase__=None , UpperCamelCase__=None ) -> List[str]:
'''simple docstring'''
__lowerCAmelCase = 0
__lowerCAmelCase = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(UpperCamelCase__ ):
index += 1
__lowerCAmelCase = ["""\n""".join(lines[:index] )]
else:
__lowerCAmelCase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowerCAmelCase = [lines[index]]
index += 1
while index < len(UpperCamelCase__ ) and (end_prompt is None or not lines[index].startswith(UpperCamelCase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(UpperCamelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(UpperCamelCase__ ) )
if index < len(UpperCamelCase__ ) - 1:
__lowerCAmelCase = [lines[index + 1]]
index += 1
else:
__lowerCAmelCase = []
else:
blocks.append("""\n""".join(UpperCamelCase__ ) )
__lowerCAmelCase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(UpperCamelCase__ ) > 0:
blocks.append("""\n""".join(UpperCamelCase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(UpperCamelCase__ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def UpperCAmelCase ( UpperCamelCase__ ) -> Dict:
'''simple docstring'''
def _inner(UpperCamelCase__ ):
return key(UpperCamelCase__ ).lower().replace("""_""" , """""" )
return _inner
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__=None ) -> Tuple:
'''simple docstring'''
def noop(UpperCamelCase__ ):
return x
if key is None:
__lowerCAmelCase = noop
# Constants are all uppercase, they go first.
__lowerCAmelCase = [obj for obj in objects if key(UpperCamelCase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowerCAmelCase = [obj for obj in objects if key(UpperCamelCase__ )[0].isupper() and not key(UpperCamelCase__ ).isupper()]
# Functions begin with a lowercase, they go last.
__lowerCAmelCase = [obj for obj in objects if not key(UpperCamelCase__ )[0].isupper()]
__lowerCAmelCase = ignore_underscore(UpperCamelCase__ )
return sorted(UpperCamelCase__ , key=UpperCamelCase__ ) + sorted(UpperCamelCase__ , key=UpperCamelCase__ ) + sorted(UpperCamelCase__ , key=UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
def _replace(UpperCamelCase__ ):
__lowerCAmelCase = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
__lowerCAmelCase = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowerCAmelCase = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(UpperCamelCase__ )] ) + "]"
__lowerCAmelCase = import_statement.split("""\n""" )
if len(UpperCamelCase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowerCAmelCase = 2 if lines[1].strip() == """[""" else 1
__lowerCAmelCase = [(i, _re_strip_line.search(UpperCamelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__lowerCAmelCase = sort_objects(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] )
__lowerCAmelCase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(UpperCamelCase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__lowerCAmelCase = _re_bracket_content.sub(_replace , lines[1] )
else:
__lowerCAmelCase = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowerCAmelCase = keys[:-1]
__lowerCAmelCase = get_indent(lines[1] ) + """, """.join([F'''"{k}"''' for k in sort_objects(UpperCamelCase__ )] )
return "\n".join(UpperCamelCase__ )
else:
# Finally we have to deal with imports fitting on one line
__lowerCAmelCase = _re_bracket_content.sub(_replace , UpperCamelCase__ )
return import_statement
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__=True ) -> Optional[int]:
'''simple docstring'''
with open(UpperCamelCase__ , """r""" ) as f:
__lowerCAmelCase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowerCAmelCase = split_code_in_indented_blocks(
UpperCamelCase__ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(UpperCamelCase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowerCAmelCase = main_blocks[block_idx]
__lowerCAmelCase = block.split("""\n""" )
# Get to the start of the imports.
__lowerCAmelCase = 0
while line_idx < len(UpperCamelCase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowerCAmelCase = len(UpperCamelCase__ )
else:
line_idx += 1
if line_idx >= len(UpperCamelCase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
__lowerCAmelCase = """\n""".join(block_lines[line_idx:-1] )
__lowerCAmelCase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__lowerCAmelCase = split_code_in_indented_blocks(UpperCamelCase__ , indent_level=UpperCamelCase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
__lowerCAmelCase = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowerCAmelCase = [(pattern.search(UpperCamelCase__ ).groups()[0] if pattern.search(UpperCamelCase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowerCAmelCase = [(i, key) for i, key in enumerate(UpperCamelCase__ ) if key is not None]
__lowerCAmelCase = [x[0] for x in sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowerCAmelCase = 0
__lowerCAmelCase = []
for i in range(len(UpperCamelCase__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__lowerCAmelCase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(UpperCamelCase__ )
count += 1
# And we put our main block back together with its first and last line.
__lowerCAmelCase = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(UpperCamelCase__ ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(UpperCamelCase__ , """w""" ) as f:
f.write("""\n""".join(UpperCamelCase__ ) )
def UpperCAmelCase ( UpperCamelCase__=True ) -> Tuple:
'''simple docstring'''
__lowerCAmelCase = []
for root, _, files in os.walk(UpperCamelCase__ ):
if "__init__.py" in files:
__lowerCAmelCase = sort_imports(os.path.join(UpperCamelCase__ , """__init__.py""" ) , check_only=UpperCamelCase__ )
if result:
__lowerCAmelCase = [os.path.join(UpperCamelCase__ , """__init__.py""" )]
if len(UpperCamelCase__ ) > 0:
raise ValueError(F'''Would overwrite {len(UpperCamelCase__ )} files, run `make style`.''' )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
__A : Any = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 334 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 334 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCAmelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Any , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Union[str, Any] ) ->Any:
for attribute in key.split('.' ):
lowerCamelCase__ : List[str] =getattr(snake_case_ , snake_case_ )
if weight_type is not None:
lowerCamelCase__ : Union[str, Any] =getattr(snake_case_ , snake_case_ ).shape
else:
lowerCamelCase__ : Optional[int] =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowerCamelCase__ : Optional[Any] =value
elif weight_type == "weight_g":
lowerCamelCase__ : Union[str, Any] =value
elif weight_type == "weight_v":
lowerCamelCase__ : Any =value
elif weight_type == "bias":
lowerCamelCase__ : List[str] =value
elif weight_type == "running_mean":
lowerCamelCase__ : Tuple =value
elif weight_type == "running_var":
lowerCamelCase__ : str =value
elif weight_type == "num_batches_tracked":
lowerCamelCase__ : str =value
elif weight_type == "inv_freq":
lowerCamelCase__ : Optional[Any] =value
else:
lowerCamelCase__ : Dict =value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : str ) ->int:
lowerCamelCase__ : str =[]
lowerCamelCase__ : List[Any] =fairseq_model.state_dict()
lowerCamelCase__ : Any =hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__ : List[Any] =False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == 'group' , )
lowerCamelCase__ : int =True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase__ : Union[str, Any] ='wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCamelCase__ : Any =True
if "*" in mapped_key:
lowerCamelCase__ : str =name.split(snake_case_ )[0].split('.' )[-2]
lowerCamelCase__ : List[str] =mapped_key.replace('*' , snake_case_ )
if "pos_bias_u" in name:
lowerCamelCase__ : Union[str, Any] =None
elif "pos_bias_v" in name:
lowerCamelCase__ : Optional[int] =None
elif "weight_g" in name:
lowerCamelCase__ : int ='weight_g'
elif "weight_v" in name:
lowerCamelCase__ : str ='weight_v'
elif "bias" in name:
lowerCamelCase__ : int ='bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase__ : List[str] ='weight'
elif "running_mean" in name:
lowerCamelCase__ : str ='running_mean'
elif "inv_freq" in name:
lowerCamelCase__ : str ='inv_freq'
elif "running_var" in name:
lowerCamelCase__ : List[Any] ='running_var'
elif "num_batches_tracked" in name:
lowerCamelCase__ : str ='num_batches_tracked'
else:
lowerCamelCase__ : List[Any] =None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : Any , snake_case_ : Dict ) ->Optional[Any]:
lowerCamelCase__ : Dict =full_name.split('conv_layers.' )[-1]
lowerCamelCase__ : Union[str, Any] =name.split('.' )
lowerCamelCase__ : str =int(items[0] )
lowerCamelCase__ : List[Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowerCamelCase__ : Union[str, Any] =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowerCamelCase__ : Tuple =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowerCamelCase__ : Dict =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowerCamelCase__ : Tuple =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : List[str]=None , snake_case_ : List[Any]=None , snake_case_ : Dict=True ) ->int:
if config_path is not None:
lowerCamelCase__ : Dict =WavaVecaConformerConfig.from_pretrained(snake_case_ , hidden_act='swish' )
else:
lowerCamelCase__ : Dict =WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowerCamelCase__ : Optional[int] ='rotary'
if is_finetuned:
if dict_path:
lowerCamelCase__ : Optional[int] =Dictionary.load(snake_case_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__ : List[str] =target_dict.pad_index
lowerCamelCase__ : Union[str, Any] =target_dict.bos_index
lowerCamelCase__ : Union[str, Any] =target_dict.eos_index
lowerCamelCase__ : str =len(target_dict.symbols )
lowerCamelCase__ : List[Any] =os.path.join(snake_case_ , 'vocab.json' )
if not os.path.isdir(snake_case_ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(snake_case_ ) )
return
os.makedirs(snake_case_ , exist_ok=snake_case_ )
lowerCamelCase__ : str =target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase__ : Dict =0
lowerCamelCase__ : int =1
with open(snake_case_ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(snake_case_ , snake_case_ )
lowerCamelCase__ : List[str] =WavaVecaCTCTokenizer(
snake_case_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=snake_case_ , )
lowerCamelCase__ : str =True if config.feat_extract_norm == 'layer' else False
lowerCamelCase__ : Optional[int] =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ , )
lowerCamelCase__ : int =WavaVecaProcessor(feature_extractor=snake_case_ , tokenizer=snake_case_ )
processor.save_pretrained(snake_case_ )
lowerCamelCase__ : Optional[Any] =WavaVecaConformerForCTC(snake_case_ )
else:
lowerCamelCase__ : Any =WavaVecaConformerForPreTraining(snake_case_ )
if is_finetuned:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
lowerCamelCase__ : List[Any] =argparse.Namespace(task='audio_pretraining' )
lowerCamelCase__ : Any =fairseq.tasks.setup_task(snake_case_ )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=snake_case_ )
lowerCamelCase__ : Optional[Any] =model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ , not is_finetuned )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCAmelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 174 |
"""simple docstring"""
def lowerCAmelCase_ ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : int , snake_case_ : set ) ->int:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =len(snake_case_ ), len(grid[0] )
if (
min(snake_case_ , snake_case_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowerCamelCase__ : List[Any] =0
count += depth_first_search(snake_case_ , row + 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , row - 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col + 1 , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col - 1 , snake_case_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 174 | 1 |
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> str:
a__ : Union[str, Any] = ""
for word_or_phrase in separated:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(__UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 207 |
from collections.abc import Callable
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
a__ : float = a
a__ : float = b
if function(__UpperCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(__UpperCamelCase ) == 0:
return b
elif (
function(__UpperCamelCase ) * function(__UpperCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
a__ : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(__UpperCamelCase ) == 0:
return mid
elif function(__UpperCamelCase ) * function(__UpperCamelCase ) < 0:
a__ : int = mid
else:
a__ : List[Any] = mid
a__ : Optional[Any] = start + (end - start) / 2.0
return mid
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> float:
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 207 | 1 |
"""simple docstring"""
def a__ ( lowerCAmelCase ) -> int:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), F"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
UpperCAmelCase__ : List[str] = F"""The input value of [n={number}] has to be > 0"""
raise ValueError(lowerCAmelCase )
else:
UpperCAmelCase__ : str = sylvester(number - 1 )
UpperCAmelCase__ : List[str] = num - 1
UpperCAmelCase__ : int = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 182 |
"""simple docstring"""
import cmath
import math
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> complex:
UpperCAmelCase__ : str = math.radians(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = math.radians(lowerCAmelCase )
# Convert voltage and current to rectangular form
UpperCAmelCase__ : Union[str, Any] = cmath.rect(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[str] = cmath.rect(lowerCAmelCase , lowerCAmelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 182 | 1 |
"""simple docstring"""
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
lowerCAmelCase : str = MaskFormerConfig(backbone_config=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
lowerCAmelCase : List[str] = 8_4_7
lowerCAmelCase : str = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
lowerCAmelCase : Any = 1_5_0
lowerCAmelCase : Union[str, Any] = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
lowerCAmelCase : List[str] = 1_7_1
lowerCAmelCase : str = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
lowerCAmelCase : Tuple = 1_3_3
lowerCAmelCase : List[str] = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
lowerCAmelCase : Dict = 1_9
lowerCAmelCase : Tuple = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
lowerCAmelCase : List[str] = 6_5
lowerCAmelCase : str = '''mapillary-vistas-id2label.json'''
lowerCAmelCase : Union[str, Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
lowerCAmelCase : str = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
return config
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : str = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.layers.{i}.downsample.reduction.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", f"""mask_embedder.{i}.0.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", f"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase : str = dct.pop(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = val
def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCAmelCase : List[str] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCAmelCase : int = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
lowerCAmelCase : List[Any] = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase : List[Any] = in_proj_weight[:dim, :]
lowerCAmelCase : Optional[Any] = in_proj_bias[: dim]
lowerCAmelCase : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
lowerCAmelCase : int = in_proj_bias[
dim : dim * 2
]
lowerCAmelCase : Any = in_proj_weight[
-dim :, :
]
lowerCAmelCase : str = in_proj_bias[-dim :]
# fmt: on
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCAmelCase : Tuple = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
lowerCAmelCase : Optional[Any] = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase : int = in_proj_weight[: hidden_size, :]
lowerCAmelCase : Any = in_proj_bias[:config.hidden_size]
lowerCAmelCase : Any = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCAmelCase : Any = in_proj_bias[hidden_size : hidden_size * 2]
lowerCAmelCase : int = in_proj_weight[-hidden_size :, :]
lowerCAmelCase : str = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCAmelCase : Dict = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
lowerCAmelCase : Optional[Any] = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase : Dict = in_proj_weight[: hidden_size, :]
lowerCAmelCase : Tuple = in_proj_bias[:config.hidden_size]
lowerCAmelCase : Optional[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCAmelCase : Tuple = in_proj_bias[hidden_size : hidden_size * 2]
lowerCAmelCase : Union[str, Any] = in_proj_weight[-hidden_size :, :]
lowerCAmelCase : int = in_proj_bias[-hidden_size :]
# fmt: on
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool = False ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = get_maskformer_config(SCREAMING_SNAKE_CASE )
# load original state_dict
with open(SCREAMING_SNAKE_CASE , "rb" ) as f:
lowerCAmelCase : Dict = pickle.load(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
lowerCAmelCase : Optional[int] = create_rename_keys(SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_swin_q_k_v(SCREAMING_SNAKE_CASE , config.backbone_config )
read_in_decoder_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# update to torch tensors
for key, value in state_dict.items():
lowerCAmelCase : int = torch.from_numpy(SCREAMING_SNAKE_CASE )
# load 🤗 model
lowerCAmelCase : Dict = MaskFormerForInstanceSegmentation(SCREAMING_SNAKE_CASE )
model.eval()
for name, param in model.named_parameters():
print(SCREAMING_SNAKE_CASE , param.shape )
lowerCAmelCase : Any = model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(SCREAMING_SNAKE_CASE ) == 0, f"""Unexpected keys: {unexpected_keys}"""
# verify results
lowerCAmelCase : List[Any] = prepare_img()
if "vistas" in model_name:
lowerCAmelCase : Optional[Any] = 6_5
elif "cityscapes" in model_name:
lowerCAmelCase : int = 6_5_5_3_5
else:
lowerCAmelCase : str = 2_5_5
lowerCAmelCase : Optional[Any] = True if '''ade''' in model_name else False
lowerCAmelCase : Tuple = MaskFormerImageProcessor(ignore_index=SCREAMING_SNAKE_CASE , reduce_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = image_processor(SCREAMING_SNAKE_CASE , return_tensors="pt" )
lowerCAmelCase : Any = model(**SCREAMING_SNAKE_CASE )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
lowerCAmelCase : Dict = torch.tensor(
[[3.6_353, -4.4_770, -2.6_065], [0.5_081, -4.2_394, -3.5_343], [2.1_909, -5.0_353, -1.9_323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(f"""nielsr/{model_name}""" )
image_processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''maskformer-swin-tiny-ade''',
type=str,
help=('''Name of the MaskFormer model you\'d like to convert''',),
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''',
type=str,
help='''Path to the original state dict (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 718 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Union[str, Any] ="vit"
def __init__( self , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=224 , snake_case__=16 , snake_case__=3 , snake_case__=True , snake_case__=16 , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : Union[str, Any] = intermediate_size
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : Optional[Any] = layer_norm_eps
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : Optional[Any] = patch_size
lowerCAmelCase : Tuple = num_channels
lowerCAmelCase : Optional[int] = qkv_bias
lowerCAmelCase : str = encoder_stride
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : List[Any] =version.parse("1.11" )
@property
def lowercase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase__ ( self ):
"""simple docstring"""
return 1e-4
| 681 | 0 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __A( unittest.TestCase , UpperCAmelCase ):
def lowercase__ ( self : Tuple ):
lowerCamelCase_ = load_tool("""text-to-speech""" )
self.tool.setup()
def lowercase__ ( self : int ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
lowerCamelCase_ = self.tool("""hey""" )
lowerCamelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) , ) )
def lowercase__ ( self : Tuple ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
lowerCamelCase_ = self.tool("""hey""" )
lowerCamelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) , ) )
| 272 |
lowercase = 8.314_4598
def __lowerCAmelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> float:
if temperature < 0:
raise Exception("""Temperature cannot be less than 0 K""" )
if molar_mass <= 0:
raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
lowercase = 3_0_0
lowercase = 2_8
lowercase = rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 272 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'dandelin/vilt-b32-finetuned-vqa'
lowerCAmelCase__ = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
lowerCAmelCase__ = 'image_qa'
lowerCAmelCase__ = AutoProcessor
lowerCAmelCase__ = AutoModelForVisualQuestionAnswering
lowerCAmelCase__ = ['image', 'text']
lowerCAmelCase__ = ['text']
def __init__( self , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(self , ["vision"] )
super().__init__(*lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Tuple:
return self.pre_processor(lowercase , lowercase , return_tensors="pt" )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Any:
with torch.no_grad():
return self.model(**lowercase ).logits
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> int:
lowerCamelCase_ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 313 |
import numpy as np
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = int(np.ceil((x_end - xa) / h ) )
lowerCamelCase_ = np.zeros((n + 1,) )
lowerCamelCase_ = ya
lowerCamelCase_ = xa
for k in range(lowerCamelCase__ ):
lowerCamelCase_ = f(lowerCamelCase__ , y[k] )
lowerCamelCase_ = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowerCamelCase_ = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowerCamelCase_ = f(x + h , y[k] + h * ka )
lowerCamelCase_ = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313 | 1 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class __A :
"""simple docstring"""
def __init__( self , a__):
"""simple docstring"""
_lowerCamelCase : str = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_lowerCamelCase : List[Any] = len(a__) - 1
def __snake_case ( self , a__):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : list[float] = []
for i in range(len(self.list_of_points)):
# basis function for each i
output_values.append(
comb(self.degree , a__) * ((1 - t) ** (self.degree - i)) * (t**i))
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(a__) , 5) == 1
return output_values
def __snake_case ( self , a__):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCamelCase : Any = self.basis_function(a__)
_lowerCamelCase : List[Any] = 0.0
_lowerCamelCase : List[Any] = 0.0
for i in range(len(self.list_of_points)):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __snake_case ( self , a__ = 0.01):
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
_lowerCamelCase : list[float] = [] # x coordinates of points to plot
_lowerCamelCase : list[float] = [] # y coordinates of points to plot
_lowerCamelCase : List[Any] = 0.0
while t <= 1:
_lowerCamelCase : Tuple = self.bezier_curve_function(a__)
to_plot_x.append(value[0])
to_plot_y.append(value[1])
t += step_size
_lowerCamelCase : List[Any] = [i[0] for i in self.list_of_points]
_lowerCamelCase : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
a__ , a__ , color='''blue''' , label='''Curve of Degree ''' + str(self.degree) , )
plt.scatter(a__ , a__ , color='''red''' , label='''Control Points''')
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 114 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class __A :
"""simple docstring"""
UpperCAmelCase__ = BlenderbotSmallConfig
UpperCAmelCase__ = {}
UpperCAmelCase__ = """gelu"""
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=False , a__=99 , a__=32 , a__=2 , a__=4 , a__=37 , a__=0.1 , a__=0.1 , a__=20 , a__=2 , a__=1 , a__=0 , ):
"""simple docstring"""
_lowerCamelCase : str = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : str = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : int = use_labels
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : List[Any] = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : Tuple = attention_probs_dropout_prob
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : str = eos_token_id
_lowerCamelCase : Any = pad_token_id
_lowerCamelCase : str = bos_token_id
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
_lowerCamelCase : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
_lowerCamelCase : Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1)
_lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_lowerCamelCase : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowerCamelCase : Optional[int] = prepare_blenderbot_small_inputs_dict(a__ , a__ , a__)
return config, inputs_dict
def __snake_case ( self , a__ , a__):
"""simple docstring"""
_lowerCamelCase : Optional[int] = TFBlenderbotSmallModel(config=a__).get_decoder()
_lowerCamelCase : Dict = inputs_dict['''input_ids''']
_lowerCamelCase : int = input_ids[:1, :]
_lowerCamelCase : Union[str, Any] = inputs_dict['''attention_mask'''][:1, :]
_lowerCamelCase : List[str] = inputs_dict['''head_mask''']
_lowerCamelCase : Optional[int] = 1
# first forward pass
_lowerCamelCase : Union[str, Any] = model(a__ , attention_mask=a__ , head_mask=a__ , use_cache=a__)
_lowerCamelCase, _lowerCamelCase : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size)
_lowerCamelCase : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
_lowerCamelCase : Any = tf.concat([input_ids, next_tokens] , axis=-1)
_lowerCamelCase : Optional[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1)
_lowerCamelCase : List[str] = model(a__ , attention_mask=a__)[0]
_lowerCamelCase : str = model(a__ , attention_mask=a__ , past_key_values=a__)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
_lowerCamelCase : int = int(ids_tensor((1,) , output_from_past.shape[-1]))
_lowerCamelCase : Any = output_from_no_past[:, -3:, random_slice_idx]
_lowerCamelCase : int = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(a__ , a__ , rtol=1e-3)
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , ):
if attention_mask is None:
_lowerCamelCase : List[Any] = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowerCamelCase : Union[str, Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowerCamelCase : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCamelCase : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowerCamelCase : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __A ( lowerCamelCase__ ,lowerCamelCase__ ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
UpperCAmelCase__ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__ = (
{
"""conversational""": TFBlenderbotSmallForConditionalGeneration,
"""feature-extraction""": TFBlenderbotSmallModel,
"""summarization""": TFBlenderbotSmallForConditionalGeneration,
"""text2text-generation""": TFBlenderbotSmallForConditionalGeneration,
"""translation""": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[str] = TFBlenderbotSmallModelTester(self)
_lowerCamelCase : Optional[int] = ConfigTester(self , config_class=a__)
def __snake_case ( self):
"""simple docstring"""
self.config_tester.run_common_tests()
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a__)
@require_tokenizers
@require_tf
class __A ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = [
"""Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """
""" i'm going to throw up.\nand why is that?"""
]
UpperCAmelCase__ = """facebook/blenderbot_small-90M"""
@cached_property
def __snake_case ( self):
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''')
@cached_property
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
@slow
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.tokenizer(self.src_text , return_tensors='''tf''')
_lowerCamelCase : str = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=a__ , )
_lowerCamelCase : Tuple = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=a__)[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 114 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'moussaKam/mbarthez': 1024,
'moussaKam/barthez': 1024,
'moussaKam/barthez-orangesum-title': 1024,
}
lowerCAmelCase__ = '▁'
class snake_case ( __lowercase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ = BarthezTokenizer
def __init__(self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = False if not self.vocab_file else True
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,) | 628 |
"""simple docstring"""
import cva
import numpy as np
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE_ = k
SCREAMING_SNAKE_CASE_ = window_size
else:
raise ValueError('''invalid k value''' )
def __str__(self ):
"""simple docstring"""
return str(self.k )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = cva.imread(SCREAMING_SNAKE_CASE_ , 0 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = img.shape
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = img.copy()
SCREAMING_SNAKE_CASE_ = cva.cvtColor(SCREAMING_SNAKE_CASE_ , cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = np.gradient(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = dx**2
SCREAMING_SNAKE_CASE_ = dy**2
SCREAMING_SNAKE_CASE_ = dx * dy
SCREAMING_SNAKE_CASE_ = 0.04
SCREAMING_SNAKE_CASE_ = self.window_size // 2
for y in range(SCREAMING_SNAKE_CASE_ , h - offset ):
for x in range(SCREAMING_SNAKE_CASE_ , w - offset ):
SCREAMING_SNAKE_CASE_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE_ = wxx + wyy
SCREAMING_SNAKE_CASE_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
lowerCAmelCase__ = HarrisCorner(0.04, 3)
lowerCAmelCase__, lowerCAmelCase__ = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img) | 628 | 1 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case :
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( *lowerCAmelCase : Dict , **lowerCAmelCase : Optional[int]) -> Optional[int]:
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
'''simple docstring'''
snake_case_ : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
_snake_case : Optional[int] = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""")
_snake_case : str = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png"""),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
_snake_case : Optional[Any] = vqa_pipeline(__UpperCamelCase , top_k=1)
self.assertEqual(
__UpperCamelCase , [
[{"""score""": ANY(__UpperCamelCase), """answer""": ANY(__UpperCamelCase)}],
[{"""score""": ANY(__UpperCamelCase), """answer""": ANY(__UpperCamelCase)}],
] , )
@require_torch
def UpperCamelCase_ ( self : List[Any]) -> Tuple:
"""simple docstring"""
_snake_case : Any = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""")
_snake_case : int = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_snake_case : List[str] = """How many cats are there?"""
_snake_case : Union[str, Any] = vqa_pipeline(image=__UpperCamelCase , question="""How many cats are there?""" , top_k=2)
self.assertEqual(
__UpperCamelCase , [{"""score""": ANY(__UpperCamelCase), """answer""": ANY(__UpperCamelCase)}, {"""score""": ANY(__UpperCamelCase), """answer""": ANY(__UpperCamelCase)}])
_snake_case : int = vqa_pipeline({"""image""": image, """question""": question} , top_k=2)
self.assertEqual(
__UpperCamelCase , [{"""score""": ANY(__UpperCamelCase), """answer""": ANY(__UpperCamelCase)}, {"""score""": ANY(__UpperCamelCase), """answer""": ANY(__UpperCamelCase)}])
@slow
@require_torch
def UpperCamelCase_ ( self : List[str]) -> Any:
"""simple docstring"""
_snake_case : int = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""")
_snake_case : List[Any] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_snake_case : List[Any] = """How many cats are there?"""
_snake_case : Optional[int] = vqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2)
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4) , [{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}])
_snake_case : Tuple = vqa_pipeline({"""image""": image, """question""": question} , top_k=2)
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4) , [{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}])
_snake_case : List[Any] = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2)
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4) , [[{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""")
def UpperCamelCase_ ( self : Optional[int]) -> Any:
"""simple docstring"""
pass
| 477 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__lowerCAmelCase = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 1_3_1_0_7_2,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
return torch.atana(_lowerCAmelCase , _lowerCAmelCase ) / math.pi * 2
def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
_UpperCAmelCase = torch.sin(t * math.pi / 2 ) ** 2
_UpperCAmelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_lowerCAmelCase , _lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( lowercase):
pass
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : str , __UpperCamelCase : Optional[int] ):
super().__init__()
_UpperCAmelCase = DiffusionAttnUnetaD(__UpperCamelCase , n_attn_layers=4 )
_UpperCAmelCase = deepcopy(self.diffusion )
_UpperCAmelCase = torch.quasirandom.SobolEngine(1 , scramble=__UpperCamelCase )
def __lowerCamelCase ( _lowerCAmelCase ) -> int:
_UpperCAmelCase = MODELS_MAP[model_name]["url"]
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
__lowerCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
__lowerCAmelCase = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
__lowerCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
__lowerCAmelCase = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
__lowerCAmelCase = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
__lowerCAmelCase = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[Any]:
for key, value in ATTN_MAP.items():
if name.startswith(_lowerCAmelCase ) and not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return name.replace(_lowerCAmelCase , _lowerCAmelCase )
elif name.startswith(_lowerCAmelCase ):
return [name.replace(_lowerCAmelCase , _lowerCAmelCase ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=13 ) -> List[Any]:
_UpperCAmelCase = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
_UpperCAmelCase = 0
if string.startswith("net.3." ):
depth += 1
_UpperCAmelCase = string[6:]
elif string.startswith("net." ):
_UpperCAmelCase = string[4:]
while string.startswith("main.7." ):
depth += 1
_UpperCAmelCase = string[7:]
if string.startswith("main." ):
_UpperCAmelCase = string[5:]
# mid block
if string[:2].isdigit():
_UpperCAmelCase = string[:2]
_UpperCAmelCase = string[2:]
else:
_UpperCAmelCase = string[0]
_UpperCAmelCase = string[1:]
if depth == max_depth:
_UpperCAmelCase = MID_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = "mid_block"
elif depth > 0 and int(_lowerCAmelCase ) < 7:
_UpperCAmelCase = DOWN_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = F'''down_blocks.{depth}'''
elif depth > 0 and int(_lowerCAmelCase ) > 7:
_UpperCAmelCase = UP_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
_UpperCAmelCase = DEPTH_0_TO_LAYER[layer_num]
_UpperCAmelCase = F'''up_blocks.{max_depth - 1}''' if int(_lowerCAmelCase ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
_UpperCAmelCase = string_left[1:]
if "resnets" in new_layer:
_UpperCAmelCase = convert_resconv_naming(_lowerCAmelCase )
elif "attentions" in new_layer:
_UpperCAmelCase = convert_attn_naming(_lowerCAmelCase )
_UpperCAmelCase = new_string_left
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = prefix + "." + new_layer + "." + string_left
else:
_UpperCAmelCase = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[int]:
_UpperCAmelCase = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
_UpperCAmelCase = rename(_lowerCAmelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = transform_conv_attns(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_UpperCAmelCase = v
return new_state_dict
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
if len(_lowerCAmelCase ) == 1:
if len(v.shape ) == 3:
# weight
_UpperCAmelCase = v[:, :, 0]
else:
# bias
_UpperCAmelCase = v
else:
# qkv matrices
_UpperCAmelCase = v.shape[0]
_UpperCAmelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
_UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
_UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __lowerCamelCase ( _lowerCAmelCase ) -> Tuple:
_UpperCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
_UpperCAmelCase = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
_UpperCAmelCase = download(_lowerCAmelCase )
_UpperCAmelCase = MODELS_MAP[model_name]["sample_rate"]
_UpperCAmelCase = MODELS_MAP[model_name]["sample_size"]
_UpperCAmelCase = Object()
_UpperCAmelCase = sample_size
_UpperCAmelCase = sample_rate
_UpperCAmelCase = 0
_UpperCAmelCase = UNetaDModel(sample_size=_lowerCAmelCase , sample_rate=_lowerCAmelCase )
_UpperCAmelCase = diffusers_model.state_dict()
_UpperCAmelCase = DiffusionUncond(_lowerCAmelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=_lowerCAmelCase )["state_dict"] )
_UpperCAmelCase = orig_model.diffusion_ema.eval()
_UpperCAmelCase = orig_model.state_dict()
_UpperCAmelCase = rename_orig_weights(_lowerCAmelCase )
_UpperCAmelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
_UpperCAmelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(_lowerCAmelCase ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith("kernel" ) for k in list(_lowerCAmelCase ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
_UpperCAmelCase = value.squeeze()
_UpperCAmelCase = value
diffusers_model.load_state_dict(_lowerCAmelCase )
_UpperCAmelCase = 100
_UpperCAmelCase = 33
_UpperCAmelCase = IPNDMScheduler(num_train_timesteps=_lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(_lowerCAmelCase )
_UpperCAmelCase = torch.randn([1, 2, config.sample_size] , generator=_lowerCAmelCase ).to(_lowerCAmelCase )
_UpperCAmelCase = torch.linspace(1 , 0 , steps + 1 , device=_lowerCAmelCase )[:-1]
_UpperCAmelCase = get_crash_schedule(_lowerCAmelCase )
_UpperCAmelCase = DanceDiffusionPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(33 )
_UpperCAmelCase = pipe(num_inference_steps=_lowerCAmelCase , generator=_lowerCAmelCase ).audios
_UpperCAmelCase = sampling.iplms_sample(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {} )
_UpperCAmelCase = generated.clamp(-1 , 1 )
_UpperCAmelCase = (generated - audio).abs().sum()
_UpperCAmelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , _lowerCAmelCase )
print("Diff max" , _lowerCAmelCase )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
__lowerCAmelCase = parser.parse_args()
main(args)
| 684 | 0 |
'''simple docstring'''
import math
def lowerCAmelCase( a__ : float , a__ : float ):
'''simple docstring'''
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(a__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 716 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
lowerCAmelCase_ = 6_37_81_37.0
lowerCAmelCase_ = 6_35_67_52.31_42_45
lowerCAmelCase_ = 6_3_7_8_1_3_7
def lowerCAmelCase( a__ : float , a__ : float , a__ : float , a__ : float ):
'''simple docstring'''
lowerCamelCase__ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
lowerCamelCase__ = atan((1 - flattening) * tan(radians(a__ ) ) )
lowerCamelCase__ = atan((1 - flattening) * tan(radians(a__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
lowerCamelCase__ = haversine_distance(a__ , a__ , a__ , a__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
lowerCamelCase__ = (b_lata + b_lata) / 2
lowerCamelCase__ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
lowerCamelCase__ = (sin(a__ ) ** 2) * (cos(a__ ) ** 2)
lowerCamelCase__ = cos(sigma / 2 ) ** 2
lowerCamelCase__ = (sigma - sin(a__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
lowerCamelCase__ = (cos(a__ ) ** 2) * (sin(a__ ) ** 2)
lowerCamelCase__ = sin(sigma / 2 ) ** 2
lowerCamelCase__ = (sigma + sin(a__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 426 | 0 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Any = BertTokenizer
__a : Tuple = BertTokenizerFast
__a : Union[str, Any] = True
__a : int = True
__a : Union[str, Any] = filter_non_english
def snake_case ( self ):
super().setUp()
SCREAMING_SNAKE_CASE_ : Any = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE_ : List[str] = 'unwanted, running'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(snake_case__ ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,[9, 6, 7, 12, 10, 11] )
def snake_case ( self ):
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : int = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : str = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE_ : str = tokenizer.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode(snake_case__ ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = rust_tokenizer.encode(snake_case__ ,add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode(snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# With lower casing
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer(do_lower_case=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_rust_tokenizer(do_lower_case=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer.encode(snake_case__ ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : str = rust_tokenizer.encode(snake_case__ ,add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.encode(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) ,['ah', '\u535A', '\u63A8', 'zz'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = BasicTokenizer(do_lower_case=snake_case__ ,strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['h\u00E9llo'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = BasicTokenizer(do_lower_case=snake_case__ ,strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = BasicTokenizer(do_lower_case=snake_case__ ,strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = BasicTokenizer(do_lower_case=snake_case__ ,strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = BasicTokenizer(do_lower_case=snake_case__ ,never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = BasicTokenizer()
SCREAMING_SNAKE_CASE_ : Any = 'a\n\'ll !!to?\'d of, can\'t.'
SCREAMING_SNAKE_CASE_ : Tuple = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(snake_case__ ) ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
SCREAMING_SNAKE_CASE_ : List[str] = {}
for i, token in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = i
SCREAMING_SNAKE_CASE_ : List[str] = WordpieceTokenizer(vocab=snake_case__ ,unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) ,[] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) ,['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) ,['[UNK]', 'runn', '##ing'] )
def snake_case ( self ):
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def snake_case ( self ):
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def snake_case ( self ):
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Dict = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(snake_case__ ) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(snake_case__ ) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']] )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer_class.from_pretrained('bert-base-uncased' )
SCREAMING_SNAKE_CASE_ : str = tokenizer.encode('sequence builders' ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.encode('multi-sequence build' ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = tokenizer.build_inputs_with_special_tokens(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer.build_inputs_with_special_tokens(snake_case__ ,snake_case__ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : int = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.encode_plus(
snake_case__ ,return_attention_mask=snake_case__ ,return_token_type_ids=snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Any = tokenizer_r.do_lower_case if hasattr(snake_case__ ,'do_lower_case' ) else False
SCREAMING_SNAKE_CASE_ : Any = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens['offset_mapping'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = ['的', '人', '有']
SCREAMING_SNAKE_CASE_ : List[Any] = ''.join(snake_case__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : str = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_p.encode(snake_case__ ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer_r.encode(snake_case__ ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer_r.convert_ids_to_tokens(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_p.convert_ids_to_tokens(snake_case__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(snake_case__ ,snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Dict = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_r.encode(snake_case__ ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_p.encode(snake_case__ ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = tokenizer_r.convert_ids_to_tokens(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer_p.convert_ids_to_tokens(snake_case__ )
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE_ : List[Any] = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(snake_case__ )
]
self.assertListEqual(snake_case__ ,snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
| 105 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class UpperCamelCase__( datasets.BeamBasedBuilder ):
def a__( self : List[str] )-> int:
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=lowerCAmelCase , )
def a__( self : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any] )-> int:
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def a__( self : int , lowerCAmelCase : Dict , lowerCAmelCase : List[str] )-> int:
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase )
class UpperCamelCase__( datasets.BeamBasedBuilder ):
def a__( self : Optional[int] )-> str:
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=lowerCAmelCase , )
def a__( self : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str] )-> List[Any]:
"""simple docstring"""
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def a__( self : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] )-> Optional[int]:
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase )
def lowerCamelCase__ ( ):
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def lowerCamelCase__ ( ):
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class UpperCamelCase__( lowerCAmelCase ):
@require_beam
def a__( self : Optional[int] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase = DummyBeamDataset(cache_dir=lowerCAmelCase , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , lowerCAmelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , lowerCAmelCase )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
import apache_beam as beam
UpperCAmelCase = beam.io.parquetio.WriteToParquet
UpperCAmelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase = DummyBeamDataset(cache_dir=lowerCAmelCase , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
UpperCAmelCase = partial(lowerCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , lowerCAmelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , lowerCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def a__( self : Union[str, Any] )-> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase = DummyBeamDataset(cache_dir=lowerCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def a__( self : str )-> int:
"""simple docstring"""
UpperCAmelCase = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase = NestedBeamDataset(cache_dir=lowerCAmelCase , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
UpperCAmelCase = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , lowerCAmelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , lowerCAmelCase )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 210 | 0 |
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a : Union[str, Any] = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
a : List[Any] = logging.get_logger(__name__)
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "mask2former"
__lowerCamelCase = ["swin"]
__lowerCamelCase = {"hidden_size": "hidden_dim"}
def __init__( self , snake_case__ = None , snake_case__ = 256 , snake_case__ = 256 , snake_case__ = 256 , snake_case__ = 1024 , snake_case__ = "relu" , snake_case__ = 6 , snake_case__ = 10 , snake_case__ = 8 , snake_case__ = 0.0 , snake_case__ = 2048 , snake_case__ = False , snake_case__ = False , snake_case__ = 4 , snake_case__ = 255 , snake_case__ = 100 , snake_case__ = 0.1 , snake_case__ = 2.0 , snake_case__ = 5.0 , snake_case__ = 5.0 , snake_case__ = 12544 , snake_case__ = 3.0 , snake_case__ = 0.75 , snake_case__ = 0.02 , snake_case__ = 1.0 , snake_case__ = True , snake_case__ = [4, 8, 16, 32] , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
lowercase__ : Dict= CONFIG_MAPPING["swin"](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=UpperCamelCase__ , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase__ : Optional[Any]= backbone_config.pop("model_type" )
lowercase__ : List[str]= CONFIG_MAPPING[backbone_model_type]
lowercase__ : Optional[Any]= config_class.from_dict(UpperCamelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {','.join(self.backbones_supported )}''' )
lowercase__ : int= backbone_config
lowercase__ : List[str]= feature_size
lowercase__ : Optional[int]= mask_feature_size
lowercase__ : int= hidden_dim
lowercase__ : Any= encoder_feedforward_dim
lowercase__ : Dict= activation_function
lowercase__ : Optional[Any]= encoder_layers
lowercase__ : int= decoder_layers
lowercase__ : Tuple= num_attention_heads
lowercase__ : Any= dropout
lowercase__ : Optional[Any]= dim_feedforward
lowercase__ : str= pre_norm
lowercase__ : Tuple= enforce_input_projection
lowercase__ : str= common_stride
lowercase__ : List[str]= ignore_value
lowercase__ : Union[str, Any]= num_queries
lowercase__ : int= no_object_weight
lowercase__ : Dict= class_weight
lowercase__ : Optional[int]= mask_weight
lowercase__ : Any= dice_weight
lowercase__ : Any= train_num_points
lowercase__ : Tuple= oversample_ratio
lowercase__ : Tuple= importance_sample_ratio
lowercase__ : Union[str, Any]= init_std
lowercase__ : int= init_xavier_std
lowercase__ : Tuple= use_auxiliary_loss
lowercase__ : Dict= feature_strides
lowercase__ : Dict= output_auxiliary_logits
lowercase__ : int= decoder_layers
super().__init__(**UpperCamelCase__ )
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
return cls(
backbone_config=UpperCamelCase__ , **UpperCamelCase__ , )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= copy.deepcopy(self.__dict__ )
lowercase__ : Optional[int]= self.backbone_config.to_dict()
lowercase__ : List[str]= self.__class__.model_type
return output
| 700 |
"""simple docstring"""
a : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ):
lowercase__ : Union[str, Any]= f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(A )
lowercase__ : str= "".join(bin(A )[2:].zfill(8 ) for byte in data )
lowercase__ : Tuple= len(A ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ : Union[str, Any]= b"=" * ((6 - len(A ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A ) % 6)
else:
lowercase__ : str= b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A ) , 6 ) ).encode()
+ padding
)
def lowercase__(A ) ->bytes:
"""simple docstring"""
if not isinstance(A , A ) and not isinstance(A , A ):
lowercase__ : str= (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(A )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A , A ):
try:
lowercase__ : Optional[Any]= encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ : List[Any]= encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ : str= encoded_data[:-padding]
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ : Tuple= "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ : Any= [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A ) , 8 )
]
return bytes(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 0 |
class lowerCAmelCase__ :
def __init__( self , a , a=None , a=None ) -> Any:
'''simple docstring'''
_UpperCamelCase = data
_UpperCamelCase = previous
_UpperCamelCase = next_node
def __str__( self ) -> Optional[Any]:
'''simple docstring'''
return F'{self.data}'
def A_ ( self ) -> Optional[int]:
'''simple docstring'''
return self.data
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.next
def A_ ( self ) -> str:
'''simple docstring'''
return self.previous
class lowerCAmelCase__ :
def __init__( self , a ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = head
def __iter__( self ) -> Tuple:
'''simple docstring'''
return self
def A_ ( self ) -> List[Any]:
'''simple docstring'''
if not self.current:
raise StopIteration
else:
_UpperCamelCase = self.current.get_data()
_UpperCamelCase = self.current.get_next()
return value
class lowerCAmelCase__ :
def __init__( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = None # First node in list
_UpperCamelCase = None # Last node in list
def __str__( self ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.head
_UpperCamelCase = []
while current is not None:
nodes.append(current.get_data() )
_UpperCamelCase = current.get_next()
return " ".join(str(A__ ) for node in nodes )
def __contains__( self , a ) -> int:
'''simple docstring'''
_UpperCamelCase = self.head
while current:
if current.get_data() == value:
return True
_UpperCamelCase = current.get_next()
return False
def __iter__( self ) -> List[Any]:
'''simple docstring'''
return LinkedListIterator(self.head )
def A_ ( self ) -> Tuple:
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def A_ ( self ) -> Dict:
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def A_ ( self , a ) -> Any:
'''simple docstring'''
if self.head is None:
_UpperCamelCase = node
_UpperCamelCase = node
else:
self.insert_before_node(self.head , A__ )
def A_ ( self , a ) -> Union[str, Any]:
'''simple docstring'''
if self.head is None:
self.set_head(A__ )
else:
self.insert_after_node(self.tail , A__ )
def A_ ( self , a ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = Node(A__ )
if self.head is None:
self.set_head(A__ )
else:
self.set_tail(A__ )
def A_ ( self , a , a ) -> Dict:
'''simple docstring'''
_UpperCamelCase = node
_UpperCamelCase = node.previous
if node.get_previous() is None:
_UpperCamelCase = node_to_insert
else:
_UpperCamelCase = node_to_insert
_UpperCamelCase = node_to_insert
def A_ ( self , a , a ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = node
_UpperCamelCase = node.next
if node.get_next() is None:
_UpperCamelCase = node_to_insert
else:
_UpperCamelCase = node_to_insert
_UpperCamelCase = node_to_insert
def A_ ( self , a , a ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = 1
_UpperCamelCase = Node(A__ )
_UpperCamelCase = self.head
while node:
if current_position == position:
self.insert_before_node(A__ , A__ )
return
current_position += 1
_UpperCamelCase = node.next
self.insert_after_node(self.tail , A__ )
def A_ ( self , a ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.head
while node:
if node.get_data() == item:
return node
_UpperCamelCase = node.get_next()
raise Exception("""Node not found""" )
def A_ ( self , a ) -> str:
'''simple docstring'''
if (node := self.get_node(A__ )) is not None:
if node == self.head:
_UpperCamelCase = self.head.get_next()
if node == self.tail:
_UpperCamelCase = self.tail.get_previous()
self.remove_node_pointers(A__ )
@staticmethod
def A_ ( a ) -> List[Any]:
'''simple docstring'''
if node.get_next():
_UpperCamelCase = node.previous
if node.get_previous():
_UpperCamelCase = node.next
_UpperCamelCase = None
_UpperCamelCase = None
def A_ ( self ) -> Tuple:
'''simple docstring'''
return self.head is None
def __A() -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 612 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
UpperCAmelCase__ :Union[str, Any] = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : List[Any] = 'sequence-classification'
def __init__( self : int , A__ : Optional[int] ):
"""simple docstring"""
if type(A__ ) == dict:
__lowerCamelCase : str = Namespace(**A__ )
__lowerCamelCase : List[str] = glue_output_modes[hparams.task]
__lowerCamelCase : Dict = glue_tasks_num_labels[hparams.task]
super().__init__(A__ , A__ , self.mode )
def a_ ( self : Dict , **A__ : List[str] ):
"""simple docstring"""
return self.model(**A__ )
def a_ ( self : List[str] , A__ : List[Any] , A__ : str ):
"""simple docstring"""
__lowerCamelCase : Optional[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__lowerCamelCase : Tuple = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
__lowerCamelCase : Any = self(**A__ )
__lowerCamelCase : Optional[int] = outputs[0]
__lowerCamelCase : Union[str, Any] = self.trainer.lr_schedulers[0]["""scheduler"""]
__lowerCamelCase : str = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def a_ ( self : str ):
"""simple docstring"""
__lowerCamelCase : str = self.hparams
__lowerCamelCase : Optional[Any] = processors[args.task]()
__lowerCamelCase : List[Any] = processor.get_labels()
for mode in ["train", "dev"]:
__lowerCamelCase : Union[str, Any] = self._feature_file(A__ )
if os.path.exists(A__ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , A__ )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
__lowerCamelCase : List[Any] = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
__lowerCamelCase : Dict = convert_examples_to_features(
A__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , A__ )
torch.save(A__ , A__ )
def a_ ( self : Optional[Any] , A__ : str , A__ : int , A__ : bool = False ):
"""simple docstring"""
__lowerCamelCase : int = """dev""" if mode == """test""" else mode
__lowerCamelCase : List[str] = self._feature_file(A__ )
logger.info("""Loading features from cached file %s""" , A__ )
__lowerCamelCase : Any = torch.load(A__ )
__lowerCamelCase : Optional[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__lowerCamelCase : int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__lowerCamelCase : Any = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__lowerCamelCase : List[str] = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__lowerCamelCase : List[str] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(A__ , A__ , A__ , A__ ) , batch_size=A__ , shuffle=A__ , )
def a_ ( self : Tuple , A__ : Optional[int] , A__ : Optional[int] ):
"""simple docstring"""
__lowerCamelCase : Optional[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__lowerCamelCase : List[str] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
__lowerCamelCase : int = self(**A__ )
__lowerCamelCase , __lowerCamelCase : List[str] = outputs[:2]
__lowerCamelCase : str = logits.detach().cpu().numpy()
__lowerCamelCase : List[Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def a_ ( self : int , A__ : str ):
"""simple docstring"""
__lowerCamelCase : Optional[Any] = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
__lowerCamelCase : Optional[int] = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__lowerCamelCase : Optional[int] = np.argmax(A__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__lowerCamelCase : List[str] = np.squeeze(A__ )
__lowerCamelCase : List[str] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
__lowerCamelCase : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )]
__lowerCamelCase : Dict = [[] for _ in range(out_label_ids.shape[0] )]
__lowerCamelCase : Optional[int] = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , A__ , A__ )}
__lowerCamelCase : int = dict(results.items() )
__lowerCamelCase : List[Any] = results
return ret, preds_list, out_label_list
def a_ ( self : List[Any] , A__ : list ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = self._eval_end(A__ )
__lowerCamelCase : Optional[Any] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def a_ ( self : Tuple , A__ : int ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = self._eval_end(A__ )
__lowerCamelCase : str = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def a_ ( A__ : Union[str, Any] , A__ : int ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(A__ , A__ )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=A__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=A__ , required=A__ , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=A__ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
def __lowercase () -> Optional[int]:
"""simple docstring"""
__lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
add_generic_args(_lowercase, os.getcwd() )
__lowerCamelCase : str = GLUETransformer.add_model_specific_args(_lowercase, os.getcwd() )
__lowerCamelCase : List[Any] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__lowerCamelCase : int = os.path.join(
"""./results""", f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}", )
os.makedirs(args.output_dir )
__lowerCamelCase : Optional[int] = GLUETransformer(_lowercase )
__lowerCamelCase : str = generic_train(_lowercase, _lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__lowerCamelCase : List[Any] = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt""" ), recursive=_lowercase ) )
__lowerCamelCase : Dict = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_lowercase )
if __name__ == "__main__":
main()
| 150 | 0 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(example['''content'''] , truncation=a__ )['''input_ids''']
SCREAMING_SNAKE_CASE : Optional[int] = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
a__ : List[Any] = HfArgumentParser(PretokenizationArguments)
a__ : str = parser.parse_args()
if args.num_workers is None:
a__ : List[str] = multiprocessing.cpu_count()
a__ : Dict = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a__ : List[str] = time.time()
a__ : Dict = load_dataset(args.dataset_name, split='''train''')
print(F"Dataset loaded in {time.time()-t_start:.2f}s")
a__ : str = time.time()
a__ : str = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F"Dataset tokenized in {time.time()-t_start:.2f}s")
a__ : str = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"Data pushed to the hub in {time.time()-t_start:.2f}s")
| 333 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : UNetaDModel
__SCREAMING_SNAKE_CASE : KarrasVeScheduler
def __init__( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
super().__init__()
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__( self , _lowerCamelCase = 1 , _lowerCamelCase = 50 , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , **_lowerCamelCase , ) ->Union[Tuple, ImagePipelineOutput]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.unet.config.sample_size
SCREAMING_SNAKE_CASE : List[str] = (batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE : Optional[int] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.schedule[t]
SCREAMING_SNAKE_CASE : Any = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.scheduler.add_noise_to_input(_lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
SCREAMING_SNAKE_CASE : List[str] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
SCREAMING_SNAKE_CASE : Union[str, Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.step_correct(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , step_output.prev_sample , step_output['''derivative'''] , )
SCREAMING_SNAKE_CASE : Optional[int] = step_output.prev_sample
SCREAMING_SNAKE_CASE : Any = (sample / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Tuple = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 333 | 1 |
import math
import os
import sys
def _lowerCAmelCase ( UpperCamelCase__: str ) -> str:
"""simple docstring"""
A = """"""
try:
with open(UpperCamelCase__ , """rb""" ) as binary_file:
A = binary_file.read()
for dat in data:
A = f'{dat:08b}'
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def _lowerCAmelCase ( UpperCamelCase__: dict[str, str] , UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: str ) -> None:
"""simple docstring"""
lexicon.pop(UpperCamelCase__ )
A = last_match_id
if math.loga(UpperCamelCase__ ).is_integer():
for curr_key in lexicon:
A = """0""" + lexicon[curr_key]
A = bin(UpperCamelCase__ )[2:]
def _lowerCAmelCase ( UpperCamelCase__: str ) -> str:
"""simple docstring"""
A = {"""0""": """0""", """1""": """1"""}
A , A = """""", """"""
A = len(UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
index += 1
A = """"""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
A = lexicon[curr_string]
result += last_match_id
return result
def _lowerCAmelCase ( UpperCamelCase__: str , UpperCamelCase__: str ) -> str:
"""simple docstring"""
A = os.path.getsize(UpperCamelCase__ )
A = bin(UpperCamelCase__ )[2:]
A = len(UpperCamelCase__ )
return "0" * (length_length - 1) + file_length_binary + compressed
def _lowerCAmelCase ( UpperCamelCase__: str , UpperCamelCase__: str ) -> None:
"""simple docstring"""
A = 8
try:
with open(UpperCamelCase__ , """wb""" ) as opened_file:
A = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(UpperCamelCase__ , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def _lowerCAmelCase ( UpperCamelCase__: str , UpperCamelCase__: str ) -> None:
"""simple docstring"""
A = read_file_binary(UpperCamelCase__ )
A = compress_data(UpperCamelCase__ )
A = add_file_length(UpperCamelCase__ , UpperCamelCase__ )
write_file_binary(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 641 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowercase : Optional[int] = False
@skip_mps
class _UpperCamelCase ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = StableDiffusionAttendAndExcitePipeline
lowerCAmelCase = False
lowerCAmelCase = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} )
lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def _UpperCAmelCase ( cls ) -> List[Any]:
super().setUpClass()
torch.use_deterministic_algorithms(a__ )
@classmethod
def _UpperCAmelCase ( cls ) -> Tuple:
super().tearDownClass()
torch.use_deterministic_algorithms(a__ )
def _UpperCAmelCase ( self ) -> Dict:
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
A = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=a__ , set_alpha_to_one=a__ , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A = CLIPTextModel(a__ )
A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _UpperCAmelCase ( self , a__ , a__=0 ) -> Optional[Any]:
if str(a__ ).startswith("""mps""" ):
A = torch.manual_seed(a__ )
else:
A = torch.Generator(device=a__ ).manual_seed(a__ )
A = A = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def _UpperCAmelCase ( self ) -> Union[str, Any]:
A = """cpu"""
A = self.get_dummy_components()
A = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
A = self.get_dummy_inputs(a__ )
A = pipe(**a__ ).images
A = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
A = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
A = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1e-3 )
def _UpperCAmelCase ( self ) -> List[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def _UpperCAmelCase ( self ) -> Dict:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _UpperCAmelCase ( self ) -> Any:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _UpperCAmelCase ( self ) -> str:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def _UpperCAmelCase ( self ) -> int:
super().test_save_load_local(expected_max_difference=5e-4 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _UpperCAmelCase ( cls ) -> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(a__ )
@classmethod
def _UpperCAmelCase ( cls ) -> Dict:
super().tearDownClass()
torch.use_deterministic_algorithms(a__ )
def _UpperCAmelCase ( self ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> int:
A = torch.manual_seed(51 )
A = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=a__ , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
A = """a painting of an elephant with glasses"""
A = [5, 7]
A = pipe(
prompt=a__ , token_indices=a__ , guidance_scale=7.5 , generator=a__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 641 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
A__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 219 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : Optional[int] = """markuplm"""
def __init__( self :int ,__lowercase :str=3_0_5_2_2 ,__lowercase :str=7_6_8 ,__lowercase :str=1_2 ,__lowercase :Dict=1_2 ,__lowercase :Optional[Any]=3_0_7_2 ,__lowercase :Any="gelu" ,__lowercase :Optional[int]=0.1 ,__lowercase :Dict=0.1 ,__lowercase :Any=5_1_2 ,__lowercase :List[Any]=2 ,__lowercase :Tuple=0.02 ,__lowercase :List[Any]=1e-1_2 ,__lowercase :List[Any]=0 ,__lowercase :Optional[int]=0 ,__lowercase :str=2 ,__lowercase :Optional[Any]=2_5_6 ,__lowercase :List[str]=1_0_2_4 ,__lowercase :List[str]=2_1_6 ,__lowercase :Union[str, Any]=1_0_0_1 ,__lowercase :int=3_2 ,__lowercase :Union[str, Any]=5_0 ,__lowercase :Optional[Any]="absolute" ,__lowercase :int=True ,__lowercase :Optional[Any]=None ,**__lowercase :Union[str, Any] ,):
super().__init__(
pad_token_id=__lowercase ,bos_token_id=__lowercase ,eos_token_id=__lowercase ,**__lowercase ,)
snake_case__ : Optional[int] = vocab_size
snake_case__ : Any = hidden_size
snake_case__ : Optional[Any] = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : Optional[int] = hidden_act
snake_case__ : Dict = intermediate_size
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : Union[str, Any] = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : str = initializer_range
snake_case__ : str = layer_norm_eps
snake_case__ : str = position_embedding_type
snake_case__ : Optional[Any] = use_cache
snake_case__ : Optional[Any] = classifier_dropout
# additional properties
snake_case__ : Any = max_depth
snake_case__ : Optional[Any] = max_xpath_tag_unit_embeddings
snake_case__ : Dict = max_xpath_subs_unit_embeddings
snake_case__ : str = tag_pad_id
snake_case__ : Union[str, Any] = subs_pad_id
snake_case__ : List[str] = xpath_unit_hidden_size
| 219 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=lowercase )
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : str = field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
_snake_case : ClassVar[Features] = Features({"""image""": Image()} )
_snake_case : ClassVar[Features] = Features({"""labels""": ClassLabel} )
_snake_case : str = "image"
_snake_case : str = "labels"
def __a ( self :Any , lowerCamelCase__ :Dict ):
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , lowerCamelCase__ ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
UpperCamelCase__ :Union[str, Any] = copy.deepcopy(self )
UpperCamelCase__ :Any = self.label_schema.copy()
UpperCamelCase__ :int = features[self.label_column]
UpperCamelCase__ :Optional[Any] = label_schema
return task_template
@property
def __a ( self :List[Any] ):
return {
self.image_column: "image",
self.label_column: "labels",
} | 45 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def _UpperCAmelCase ( UpperCamelCase: str , UpperCamelCase: Any=False ):
"""simple docstring"""
__lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
__lowerCAmelCase = "segformer.encoder." + key
if key.startswith("backbone" ):
__lowerCAmelCase = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
__lowerCAmelCase = key[key.find("patch_embed" ) + len("patch_embed" )]
__lowerCAmelCase = key.replace(F"patch_embed{idx}" , F"patch_embeddings.{int(UpperCamelCase )-1}" )
if "norm" in key:
__lowerCAmelCase = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
__lowerCAmelCase = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
__lowerCAmelCase = key.replace(F"layer_norm{idx}" , F"layer_norm.{int(UpperCamelCase )-1}" )
if "layer_norm1" in key:
__lowerCAmelCase = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
__lowerCAmelCase = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
__lowerCAmelCase = key[key.find("block" ) + len("block" )]
__lowerCAmelCase = key.replace(F"block{idx}" , F"block.{int(UpperCamelCase )-1}" )
if "attn.q" in key:
__lowerCAmelCase = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
__lowerCAmelCase = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
__lowerCAmelCase = key.replace("attn" , "attention.self" )
if "fc1" in key:
__lowerCAmelCase = key.replace("fc1" , "dense1" )
if "fc2" in key:
__lowerCAmelCase = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
__lowerCAmelCase = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
__lowerCAmelCase = key.replace("linear_fuse.conv" , "linear_fuse" )
__lowerCAmelCase = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
__lowerCAmelCase = key[key.find("linear_c" ) + len("linear_c" )]
__lowerCAmelCase = key.replace(F"linear_c{idx}" , F"linear_c.{int(UpperCamelCase )-1}" )
if key.startswith("head" ):
__lowerCAmelCase = key.replace("head" , "classifier" )
__lowerCAmelCase = value
return new_state_dict
def _UpperCAmelCase ( UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
__lowerCAmelCase = state_dict.pop(F"segformer.encoder.block.{i}.{j}.attention.self.kv.weight" )
__lowerCAmelCase = state_dict.pop(F"segformer.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
__lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
__lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
__lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
__lowerCAmelCase = kv_bias[
config.hidden_sizes[i] :
]
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return image
@torch.no_grad()
def _UpperCAmelCase ( UpperCamelCase: Any , UpperCamelCase: str , UpperCamelCase: Optional[int] ):
"""simple docstring"""
__lowerCAmelCase = SegformerConfig()
__lowerCAmelCase = False
# set attributes based on model_name
__lowerCAmelCase = "huggingface/label-files"
if "segformer" in model_name:
__lowerCAmelCase = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
__lowerCAmelCase = 1_5_0
__lowerCAmelCase = "ade20k-id2label.json"
__lowerCAmelCase = (1, 1_5_0, 1_2_8, 1_2_8)
elif "city" in model_name:
__lowerCAmelCase = 1_9
__lowerCAmelCase = "cityscapes-id2label.json"
__lowerCAmelCase = (1, 1_9, 1_2_8, 1_2_8)
else:
raise ValueError(F"Model {model_name} not supported" )
elif "mit" in model_name:
__lowerCAmelCase = True
__lowerCAmelCase = model_name[4:6]
__lowerCAmelCase = 1_0_0_0
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = (1, 1_0_0_0)
else:
raise ValueError(F"Model {model_name} not supported" )
# set config attributes
__lowerCAmelCase = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
__lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase = 2_5_6
elif size == "b2":
__lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase = 7_6_8
__lowerCAmelCase = [3, 4, 6, 3]
elif size == "b3":
__lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase = 7_6_8
__lowerCAmelCase = [3, 4, 1_8, 3]
elif size == "b4":
__lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase = 7_6_8
__lowerCAmelCase = [3, 8, 2_7, 3]
elif size == "b5":
__lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase = 7_6_8
__lowerCAmelCase = [3, 6, 4_0, 3]
else:
raise ValueError(F"Size {size} not supported" )
# load image processor (only resize + normalize)
__lowerCAmelCase = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=UpperCamelCase , align=UpperCamelCase , do_random_crop=UpperCamelCase )
# prepare image
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=UpperCamelCase , return_tensors="pt" ).pixel_values
logger.info(F"Converting model {model_name}..." )
# load original state dict
if encoder_only:
__lowerCAmelCase = torch.load(UpperCamelCase , map_location=torch.device("cpu" ) )
else:
__lowerCAmelCase = torch.load(UpperCamelCase , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
__lowerCAmelCase = rename_keys(UpperCamelCase , encoder_only=UpperCamelCase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(UpperCamelCase , UpperCamelCase )
# create HuggingFace model and load state dict
if encoder_only:
__lowerCAmelCase = False
__lowerCAmelCase = SegformerForImageClassification(UpperCamelCase )
else:
__lowerCAmelCase = SegformerForSemanticSegmentation(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
# forward pass
__lowerCAmelCase = model(UpperCamelCase )
__lowerCAmelCase = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
__lowerCAmelCase = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
__lowerCAmelCase = torch.tensor(
[
[[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]],
[[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]],
[[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
__lowerCAmelCase = torch.tensor(
[
[[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]],
[[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]],
[[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
__lowerCAmelCase = torch.tensor(
[
[[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]],
[[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]],
[[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
__lowerCAmelCase = torch.tensor(
[
[[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]],
[[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]],
[[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
__lowerCAmelCase = torch.tensor(
[
[[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]],
[[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]],
[[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]],
[[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]],
[[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]],
[[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]],
[[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
__lowerCAmelCase = torch.tensor(
[
[
[-1.13_72e01, -1.27_87e01, -1.34_77e01],
[-1.25_36e01, -1.41_94e01, -1.44_09e01],
[-1.32_17e01, -1.48_88e01, -1.53_27e01],
],
[
[-1.47_91e01, -1.71_22e01, -1.82_77e01],
[-1.71_63e01, -1.91_92e01, -1.95_33e01],
[-1.78_97e01, -1.99_91e01, -2.03_15e01],
],
[
[7.67_23e-01, 4.19_21e-01, -7.78_78e-02],
[4.77_72e-01, 9.55_57e-03, -2.80_82e-01],
[3.60_32e-01, -2.48_26e-01, -5.11_68e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]],
[[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]],
[[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]],
[[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]],
[[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]],
[[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]],
[[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]],
[[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]],
[[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
__lowerCAmelCase = torch.tensor(
[
[[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]],
[[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]],
[[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]],
] )
else:
__lowerCAmelCase = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="segformer.b0.512x512.ade.160k",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
UpperCamelCase_ = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 611 | 0 |
def A ( lowercase__ : str ) -> bool:
UpperCamelCase__ :Any = 0
for ch in input_str:
UpperCamelCase__ :List[Any] = ord(lowercase__ )
UpperCamelCase__ :int = pow(2 , lowercase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 720 |
from __future__ import annotations
def A ( lowercase__ : list[int] ) -> int:
if not nums:
return 0
UpperCamelCase__ :Dict = nums[0]
UpperCamelCase__ :Dict = 0
for num in nums[1:]:
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = (
max_excluding + num,
max(lowercase__ , lowercase__ ),
)
return max(lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 383 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_A = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Tuple:
super().__init__()
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=_UpperCamelCase , speech_processor=_UpperCamelCase , vae=_UpperCamelCase , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase , unet=_UpperCamelCase , scheduler=_UpperCamelCase , feature_extractor=_UpperCamelCase , )
def __a ( self , _UpperCamelCase = "auto" ) -> Dict:
if slice_size == "auto":
lowerCAmelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_UpperCamelCase )
def __a ( self ) -> List[str]:
self.enable_attention_slicing(_UpperCamelCase )
@torch.no_grad()
def __call__( self , _UpperCamelCase , _UpperCamelCase=16_000 , _UpperCamelCase = 512 , _UpperCamelCase = 512 , _UpperCamelCase = 50 , _UpperCamelCase = 7.5 , _UpperCamelCase = None , _UpperCamelCase = 1 , _UpperCamelCase = 0.0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = 1 , **_UpperCamelCase , ) -> str:
lowerCAmelCase_ = self.speech_processor.feature_extractor(
_UpperCamelCase , return_tensors="pt" , sampling_rate=_UpperCamelCase ).input_features.to(self.device )
lowerCAmelCase_ = self.speech_model.generate(_UpperCamelCase , max_length=480_000 )
lowerCAmelCase_ = self.speech_processor.tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase , normalize=_UpperCamelCase )[
0
]
if isinstance(_UpperCamelCase , _UpperCamelCase ):
lowerCAmelCase_ = 1
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
lowerCAmelCase_ = len(_UpperCamelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_UpperCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_UpperCamelCase , _UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(_UpperCamelCase )}.""" )
# get prompt text embeddings
lowerCAmelCase_ = self.tokenizer(
_UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
lowerCAmelCase_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowerCAmelCase_ = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCAmelCase_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = text_embeddings.shape
lowerCAmelCase_ = text_embeddings.repeat(1 , _UpperCamelCase , 1 )
lowerCAmelCase_ = text_embeddings.view(bs_embed * num_images_per_prompt , _UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCAmelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase_ = 42
if negative_prompt is None:
lowerCAmelCase_ = [""] * batch_size
elif type(_UpperCamelCase ) is not type(_UpperCamelCase ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(_UpperCamelCase )} !="""
f""" {type(_UpperCamelCase )}.""" )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
lowerCAmelCase_ = [negative_prompt]
elif batch_size != len(_UpperCamelCase ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(_UpperCamelCase )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
lowerCAmelCase_ = negative_prompt
lowerCAmelCase_ = text_input_ids.shape[-1]
lowerCAmelCase_ = self.tokenizer(
_UpperCamelCase , padding="max_length" , max_length=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors="pt" , )
lowerCAmelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase_ = uncond_embeddings.shape[1]
lowerCAmelCase_ = uncond_embeddings.repeat(1 , _UpperCamelCase , 1 )
lowerCAmelCase_ = uncond_embeddings.view(batch_size * num_images_per_prompt , _UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCAmelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCAmelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCAmelCase_ = torch.randn(_UpperCamelCase , generator=_UpperCamelCase , device="cpu" , dtype=_UpperCamelCase ).to(
self.device )
else:
lowerCAmelCase_ = torch.randn(_UpperCamelCase , generator=_UpperCamelCase , device=self.device , dtype=_UpperCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowerCAmelCase_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCAmelCase_ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase_ = {}
if accepts_eta:
lowerCAmelCase_ = eta
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase_ = self.scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
# predict the noise residual
lowerCAmelCase_ = self.unet(_UpperCamelCase , _UpperCamelCase , encoder_hidden_states=_UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCAmelCase_ , lowerCAmelCase_ = noise_pred.chunk(2 )
lowerCAmelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase_ = self.scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = 1 / 0.18215 * latents
lowerCAmelCase_ = self.vae.decode(_UpperCamelCase ).sample
lowerCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase_ = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_UpperCamelCase , nsfw_content_detected=_UpperCamelCase )
| 290 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
_A = logging.get_logger(__name__)
_A = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class _lowerCAmelCase ( __a ):
_lowercase ='''imagegpt'''
_lowercase =['''past_key_values''']
_lowercase ={
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _UpperCamelCase=512 + 1 , _UpperCamelCase=32 * 32 , _UpperCamelCase=512 , _UpperCamelCase=24 , _UpperCamelCase=8 , _UpperCamelCase=None , _UpperCamelCase="quick_gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=1e-5 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=False , **_UpperCamelCase , ) -> Tuple:
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = n_positions
lowerCAmelCase_ = n_embd
lowerCAmelCase_ = n_layer
lowerCAmelCase_ = n_head
lowerCAmelCase_ = n_inner
lowerCAmelCase_ = activation_function
lowerCAmelCase_ = resid_pdrop
lowerCAmelCase_ = embd_pdrop
lowerCAmelCase_ = attn_pdrop
lowerCAmelCase_ = layer_norm_epsilon
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = scale_attn_weights
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = scale_attn_by_inverse_layer_idx
lowerCAmelCase_ = reorder_and_upcast_attn
lowerCAmelCase_ = tie_word_embeddings
super().__init__(tie_word_embeddings=_UpperCamelCase , **_UpperCamelCase )
class _lowerCAmelCase ( __a ):
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
] )
def __a ( self , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = -1 , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = 3 , _UpperCamelCase = 32 , _UpperCamelCase = 32 , ) -> Mapping[str, Any]:
lowerCAmelCase_ = self._generate_dummy_images(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = dict(preprocessor(images=_UpperCamelCase , return_tensors=_UpperCamelCase ) )
return inputs
| 290 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
a__ : List[str] = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = ['''ViTFeatureExtractor''']
a__ : Any = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
a__ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 716 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = torch.load(a__ , map_location='''cpu''' )
SCREAMING_SNAKE_CASE : List[str] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
SCREAMING_SNAKE_CASE : Optional[Any] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
SCREAMING_SNAKE_CASE : str = v
else:
SCREAMING_SNAKE_CASE : Dict = v
SCREAMING_SNAKE_CASE : int = chkpt['''params''']
SCREAMING_SNAKE_CASE : Tuple = {n: v for n, v in config.items() if not isinstance(a__ , (torch.FloatTensor, numpy.ndarray) )}
SCREAMING_SNAKE_CASE : List[str] = chkpt['''dico_word2id''']
SCREAMING_SNAKE_CASE : Optional[Any] = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
SCREAMING_SNAKE_CASE : List[Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
SCREAMING_SNAKE_CASE : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
SCREAMING_SNAKE_CASE : List[Any] = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(a__ , a__ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(a__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(a__ , indent=2 ) + '''\n''' )
print(F"""Save vocab file to {pytorch_config_dump_path}""" )
with open(a__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(a__ , indent=2 ) + '''\n''' )
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a__ : int = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 333 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__SCREAMING_SNAKE_CASE : Tuple = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : str = False
def a_ ( self , lowercase_ , lowercase_ , lowercase_=False ) -> Any:
UpperCAmelCase = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=3_2 , lowercase_=2 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.0_2 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Any:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = embedding_size
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
UpperCAmelCase = TFMobileBertModel(config=lowercase_ )
UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCAmelCase = model(lowercase_ )
UpperCAmelCase = [input_ids, input_mask]
UpperCAmelCase = model(lowercase_ )
UpperCAmelCase = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a_ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
UpperCAmelCase = TFMobileBertForMaskedLM(config=lowercase_ )
UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCAmelCase = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=lowercase_ )
UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCAmelCase = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def a_ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str:
UpperCAmelCase = TFMobileBertForPreTraining(config=lowercase_ )
UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCAmelCase = model(lowercase_ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def a_ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFMobileBertForSequenceClassification(config=lowercase_ )
UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCAmelCase = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
UpperCAmelCase = self.num_choices
UpperCAmelCase = TFMobileBertForMultipleChoice(config=lowercase_ )
UpperCAmelCase = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCAmelCase = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFMobileBertForTokenClassification(config=lowercase_ )
UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCAmelCase = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
UpperCAmelCase = TFMobileBertForQuestionAnswering(config=lowercase_ )
UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCAmelCase = model(lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ ( self ) -> List[Any]:
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7 )
def a_ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowercase_ )
def a_ ( self ) -> Dict:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowercase_ )
def a_ ( self ) -> int:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowercase_ )
def a_ ( self ) -> List[Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowercase_ )
def a_ ( self ) -> Optional[Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowercase_ )
def a_ ( self ) -> Tuple:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowercase_ )
def a_ ( self ) -> Union[str, Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowercase_ )
def a_ ( self ) -> Any:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowercase_ )
@slow
def a_ ( self ) -> Any:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
UpperCAmelCase = TFMobileBertModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase = model(lowercase_ )[0]
UpperCAmelCase = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , lowercase_ )
UpperCAmelCase = tf.constant(
[
[
[-4.5_9_1_9_5_4_7, -9.2_4_8_2_9_5, -9.6_4_5_2_5_6],
[-6.7_3_0_6_1_7_5, -6.4_4_0_2_8_4, -6.6_0_5_2_8_3_7],
[-7.2_7_4_3_5_0_6, -6.7_8_4_7_9_1_5, -6.0_2_4_6_7_3],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1E-4 )
| 373 |
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
SCREAMING_SNAKE_CASE_ = '''1'''
SCREAMING_SNAKE_CASE_ = '''0'''
SCREAMING_SNAKE_CASE_ = '''1'''
SCREAMING_SNAKE_CASE_ = ort.SessionOptions()
SCREAMING_SNAKE_CASE_ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
SCREAMING_SNAKE_CASE_ = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
SCREAMING_SNAKE_CASE_ = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
SCREAMING_SNAKE_CASE_ = ort.RunOptions()
SCREAMING_SNAKE_CASE_ = 128
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = np.ones((batch, sequence), dtype=np.intaa)
SCREAMING_SNAKE_CASE_ = np.ones((batch, sequence), dtype=np.intaa)
SCREAMING_SNAKE_CASE_ = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
SCREAMING_SNAKE_CASE_ = time.time()
SCREAMING_SNAKE_CASE_ = 2000
SCREAMING_SNAKE_CASE_ = {}
for iter in range(max_iters):
SCREAMING_SNAKE_CASE_ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
| 373 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class lowercase_ (_UpperCAmelCase ):
A__ : Any = '''gptsan-japanese'''
A__ : Union[str, Any] = [
'''past_key_values''',
]
A__ : Dict = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , a_=3_6_0_0_0 , a_=1_2_8_0 , a_=1_0_2_4 , a_=8_1_9_2 , a_=4_0_9_6 , a_=1_2_8 , a_=1_0 , a_=0 , a_=1_6 , a_=1_6 , a_=1_2_8 , a_=0.0 , a_=1E-5 , a_=False , a_=0.0 , a_="float32" , a_=False , a_=False , a_=False , a_=0.002 , a_=False , a_=True , a_=3_5_9_9_8 , a_=3_5_9_9_5 , a_=3_5_9_9_9 , **a_ , ) ->Dict:
'''simple docstring'''
_a = vocab_size
_a = max_position_embeddings
_a = d_model
_a = d_ff
_a = d_ext
_a = d_spout
_a = num_switch_layers
_a = num_ext_layers
_a = num_switch_layers + num_ext_layers
_a = num_heads
_a = num_experts
_a = expert_capacity
_a = dropout_rate
_a = layer_norm_epsilon
_a = router_bias
_a = router_jitter_noise
_a = router_dtype
_a = router_ignore_padding_tokens
_a = output_hidden_states
_a = output_attentions
_a = initializer_factor
_a = output_router_logits
_a = use_cache
super().__init__(
separator_token_id=a_ , pad_token_id=a_ , eos_token_id=a_ , **a_ , )
| 612 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowerCAmelCase ( UpperCamelCase_: str ) -> Union[str, Any]:
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_a = model_type_to_module_name(UpperCamelCase_ )
_a = importlib.import_module(f'''.{module_name}''' , "transformers.models" )
try:
return getattr(UpperCamelCase_ , UpperCamelCase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(UpperCamelCase_ , "__name__" , UpperCamelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_a = importlib.import_module("transformers" )
if hasattr(UpperCamelCase_ , UpperCamelCase_ ):
return getattr(UpperCamelCase_ , UpperCamelCase_ )
return None
def lowerCAmelCase ( UpperCamelCase_: Union[str, os.PathLike] , UpperCamelCase_: Optional[Union[str, os.PathLike]] = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[Dict[str, str]] = None , UpperCamelCase_: Optional[Union[bool, str]] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: bool = False , **UpperCamelCase_: Dict , ) -> Optional[int]:
'''simple docstring'''
_a = get_file_from_repo(
UpperCamelCase_ , UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , resume_download=UpperCamelCase_ , proxies=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , local_files_only=UpperCamelCase_ , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(UpperCamelCase_ , encoding="utf-8" ) as reader:
return json.load(UpperCamelCase_ )
class lowercase_ :
def __init__( self ) ->List[Any]:
'''simple docstring'''
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(a_ )
def lowerCamelCase__ ( cls , a_ , **a_ ) ->Dict:
'''simple docstring'''
_a = kwargs.pop("config" , a_ )
_a = kwargs.pop("trust_remote_code" , a_ )
_a = True
_a , _a = ImageProcessingMixin.get_image_processor_dict(a_ , **a_ )
_a = config_dict.get("image_processor_type" , a_ )
_a = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
_a = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_a = config_dict.pop("feature_extractor_type" , a_ )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
_a = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
_a = config_dict["auto_map"]["AutoFeatureExtractor"]
_a = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(a_ , a_ ):
_a = AutoConfig.from_pretrained(a_ , **a_ )
# It could be in `config.image_processor_type``
_a = getattr(a_ , "image_processor_type" , a_ )
if hasattr(a_ , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
_a = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
_a = image_processor_class_from_name(a_ )
_a = image_processor_auto_map is not None
_a = image_processor_class is not None or type(a_ ) in IMAGE_PROCESSOR_MAPPING
_a = resolve_trust_remote_code(
a_ , a_ , a_ , a_ )
if has_remote_code and trust_remote_code:
_a = get_class_from_dynamic_module(
a_ , a_ , **a_ )
_a = kwargs.pop("code_revision" , a_ )
if os.path.isdir(a_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(a_ , **a_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(a_ , **a_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(a_ ) in IMAGE_PROCESSOR_MAPPING:
_a = IMAGE_PROCESSOR_MAPPING[type(a_ )]
return image_processor_class.from_dict(a_ , **a_ )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowerCamelCase__ ( a_ , a_ ) ->Optional[int]:
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(a_ , a_ )
| 612 | 1 |
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _snake_case:
__snake_case: Optional[Any] = None
@experimental
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )
return _map_with_joblib(UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
A__ = num_proc if num_proc <= len(UpperCAmelCase ) else len(UpperCAmelCase )
A__ = [] # We organize the splits ourselve (contiguous splits)
for index in range(UpperCAmelCase ):
A__ = len(UpperCAmelCase ) // num_proc
A__ = len(UpperCAmelCase ) % num_proc
A__ = div * index + min(UpperCAmelCase ,UpperCAmelCase )
A__ = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(UpperCAmelCase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"""Error dividing inputs iterable among processes. """
F"""Total number of objects {len(UpperCAmelCase )}, """
F"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
F"""Spawning {num_proc} processes for {len(UpperCAmelCase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
A__ , A__ = None, None
if not disable_tqdm:
A__ , A__ = (RLock(),), tqdm.set_lock
with Pool(UpperCAmelCase ,initargs=UpperCAmelCase ,initializer=UpperCAmelCase ) as pool:
A__ = pool.map(UpperCAmelCase ,UpperCAmelCase )
logger.info(F"""Finished {num_proc} processes""" )
A__ = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"""Unpacked {len(UpperCAmelCase )} objects""" )
return mapped
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name ,n_jobs=UpperCAmelCase ):
return joblib.Parallel()(
joblib.delayed(UpperCAmelCase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def _A ( UpperCAmelCase ):
'''simple docstring'''
A__ = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
A__ = None
| 531 |
'''simple docstring'''
def _A ( ):
'''simple docstring'''
A__ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
A__ = 6
A__ = 1
A__ = 1901
A__ = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
A__ = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
A__ = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
A__ = day - days_per_month[month - 2]
if month > 12:
year += 1
A__ = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 531 | 1 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowerCamelCase_ ( UpperCamelCase__ : str ):
'''simple docstring'''
UpperCamelCase__ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__, UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = emb.weight.shape
UpperCamelCase__ = nn.Linear(UpperCamelCase__, UpperCamelCase__, bias=UpperCamelCase__ )
UpperCamelCase__ = emb.weight.data
return lin_layer
def lowerCamelCase_ ( UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any]="facebook/mbart-large-en-ro", UpperCamelCase__ : Optional[int]=False, UpperCamelCase__ : List[str]=False ):
'''simple docstring'''
UpperCamelCase__ = torch.load(UpperCamelCase__, map_location='''cpu''' )['''model''']
remove_ignore_keys_(UpperCamelCase__ )
UpperCamelCase__ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
UpperCamelCase__ = MBartConfig.from_pretrained(UpperCamelCase__, vocab_size=UpperCamelCase__ )
if mbart_aa and finetuned:
UpperCamelCase__ = '''relu'''
UpperCamelCase__ = state_dict['''decoder.embed_tokens.weight''']
UpperCamelCase__ = MBartForConditionalGeneration(UpperCamelCase__ )
model.model.load_state_dict(UpperCamelCase__ )
if finetuned:
UpperCamelCase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
lowercase = parser.parse_args()
lowercase = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 707 | import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class __lowercase ( A ):
'''simple docstring'''
_A : int = '''data2vec-audio'''
def __init__( self : str , _a : List[Any]=32 , _a : str=768 , _a : Any=12 , _a : int=12 , _a : Dict=3_072 , _a : Tuple="gelu" , _a : Optional[Any]=0.1 , _a : str=0.1 , _a : Optional[Any]=0.1 , _a : int=0.0 , _a : Dict=0.1 , _a : Dict=0.1 , _a : Union[str, Any]=0.02 , _a : Any=1E-5 , _a : Tuple="gelu" , _a : str=(512, 512, 512, 512, 512, 512, 512) , _a : Any=(5, 2, 2, 2, 2, 2, 2) , _a : int=(10, 3, 3, 3, 3, 2, 2) , _a : Tuple=False , _a : Optional[Any]=16 , _a : Optional[int]=19 , _a : Dict=5 , _a : List[str]=0.05 , _a : Dict=10 , _a : Dict=2 , _a : List[Any]=0.0 , _a : Optional[Any]=10 , _a : Optional[Any]=0 , _a : Optional[Any]="sum" , _a : int=False , _a : Union[str, Any]=False , _a : Union[str, Any]=256 , _a : Union[str, Any]=(512, 512, 512, 512, 1_500) , _a : List[str]=(5, 3, 3, 1, 1) , _a : Optional[int]=(1, 2, 3, 1, 1) , _a : Tuple=512 , _a : Optional[Any]=0 , _a : Optional[int]=1 , _a : str=2 , _a : int=False , _a : Tuple=3 , _a : Union[str, Any]=2 , _a : List[Any]=3 , _a : str=None , **_a : Optional[int] , ):
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = feat_extract_activation
UpperCamelCase__ = list(_a )
UpperCamelCase__ = list(_a )
UpperCamelCase__ = list(_a )
UpperCamelCase__ = conv_bias
UpperCamelCase__ = num_conv_pos_embeddings
UpperCamelCase__ = num_conv_pos_embedding_groups
UpperCamelCase__ = conv_pos_kernel_size
UpperCamelCase__ = len(self.conv_dim )
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = feat_proj_dropout
UpperCamelCase__ = final_dropout
UpperCamelCase__ = layerdrop
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = initializer_range
UpperCamelCase__ = vocab_size
UpperCamelCase__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase__ = mask_time_prob
UpperCamelCase__ = mask_time_length
UpperCamelCase__ = mask_time_min_masks
UpperCamelCase__ = mask_feature_prob
UpperCamelCase__ = mask_feature_length
UpperCamelCase__ = mask_feature_min_masks
# ctc loss
UpperCamelCase__ = ctc_loss_reduction
UpperCamelCase__ = ctc_zero_infinity
# adapter
UpperCamelCase__ = add_adapter
UpperCamelCase__ = adapter_kernel_size
UpperCamelCase__ = adapter_stride
UpperCamelCase__ = num_adapter_layers
UpperCamelCase__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase__ = list(_a )
UpperCamelCase__ = list(_a )
UpperCamelCase__ = list(_a )
UpperCamelCase__ = xvector_output_dim
@property
def A_ ( self : Tuple ):
return math.prod(self.conv_stride )
| 591 | 0 |
'''simple docstring'''
lowercase__ ={
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 263 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowercase__ =version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def UpperCamelCase_ ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__=False , ):
output_path.parent.mkdir(parents=A__ , exist_ok=A__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , use_external_data_format=A__ , enable_onnx_checker=A__ , opset_version=A__ , )
else:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , opset_version=A__ , )
@torch.no_grad()
def UpperCamelCase_ ( A__ , A__ , A__ , A__ = False ):
a_ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
a_ = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
a_ = """cpu"""
a_ = Path(A__ )
# VAE DECODER
a_ = AutoencoderKL.from_pretrained(model_path + """/vae""" )
a_ = vae_decoder.config.latent_channels
# forward only through the decoder part
a_ = vae_decoder.decode
onnx_export(
A__ , model_args=(
torch.randn(1 , A__ , 25 , 25 ).to(device=A__ , dtype=A__ ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=A__ , )
del vae_decoder
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
lowercase__ =parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX')
| 263 | 1 |
'''simple docstring'''
def __lowerCamelCase ( _UpperCamelCase : Optional[int] = 1000 ):
'''simple docstring'''
UpperCAmelCase_ = 2**power
UpperCAmelCase_ = 0
while n:
UpperCAmelCase_ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 703 | '''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : List[Any] = "T5Config"
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''mt5'''
lowerCAmelCase__ = MTaConfig
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''mt5'''
lowerCAmelCase__ = MTaConfig
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''mt5'''
lowerCAmelCase__ = MTaConfig
| 43 | 0 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __a (__lowerCamelCase , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Union[str, Any] = BarthezTokenizer
_SCREAMING_SNAKE_CASE :Tuple = BarthezTokenizerFast
_SCREAMING_SNAKE_CASE :Dict = True
_SCREAMING_SNAKE_CASE :str = True
def _a ( self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ : List[str] = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_a )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''<pad>'''
SCREAMING_SNAKE_CASE__ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(_a ) , 101_122 )
def _a ( self ) -> int:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 101_122 )
@require_torch
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
SCREAMING_SNAKE_CASE__ : Tuple = [0, 57, 3_018, 70_307, 91, 2]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer(
_a , max_length=len(_a ) , padding=_a , truncation=_a , return_tensors="""pt""" )
self.assertIsInstance(_a , _a )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(_a , _a )
def _a ( self ) -> List[str]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : int = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : List[Any] = '''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.tokenize(_a )
SCREAMING_SNAKE_CASE__ : str = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode(_a , add_special_tokens=_a )
SCREAMING_SNAKE_CASE__ : Tuple = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.encode(_a )
SCREAMING_SNAKE_CASE__ : int = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
@slow
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = {'''input_ids''': [[0, 490, 14_328, 4_507, 354, 47, 43_669, 95, 25, 78_117, 20_215, 19_779, 190, 22, 400, 4, 35_343, 80_310, 603, 86, 24_937, 105, 33_438, 94_762, 196, 39_642, 7, 15, 15_933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10_534, 87, 25, 66, 3_358, 196, 55_289, 8, 82_961, 81, 2_204, 75_203, 7, 15, 763, 12_956, 216, 178, 14_328, 9_595, 1_377, 69_693, 7, 448, 71_021, 196, 18_106, 1_437, 13_974, 108, 9_083, 4, 49_315, 7, 39, 86, 1_326, 2_793, 46_333, 4, 448, 196, 74_588, 7, 49_315, 7, 39, 21, 822, 38_470, 74, 21, 66_723, 62_480, 8, 22_050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=_a , )
| 680 |
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : List[Any] = '''hf-internal-testing/tiny-random-t5'''
snake_case__ : int = AutoTokenizer.from_pretrained(lowerCamelCase )
snake_case__ : int = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase )
snake_case__ : Tuple = tokenizer('''This is me''' , return_tensors='''pt''' )
snake_case__ : Dict = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
snake_case__ : Optional[Any] = model.generate(**lowerCamelCase )
snake_case__ : str = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
snake_case__ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
snake_case__ : Union[str, Any] = model_reloaded.generate(**lowerCamelCase )
self.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase ) )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Any = '''hf-internal-testing/tiny-random-t5'''
snake_case__ : Any = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase )
snake_case__ : Union[str, Any] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowerCamelCase ):
model.save_pretrained(lowerCamelCase )
snake_case__ : List[Any] = model.reverse_bettertransformer()
model.save_pretrained(lowerCamelCase )
| 261 | 0 |
'''simple docstring'''
import random
def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple ):
'''simple docstring'''
snake_case_ : int = a[left_index]
snake_case_ : str = left_index + 1
for j in range(left_index + 1 , _A ):
if a[j] < pivot:
snake_case_ , snake_case_ : Optional[int] = a[i], a[j]
i += 1
snake_case_ , snake_case_ : Tuple = a[i - 1], a[left_index]
return i - 1
def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] ):
'''simple docstring'''
if left < right:
snake_case_ : List[Any] = random.randint(_A , right - 1 )
snake_case_ , snake_case_ : List[Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
snake_case_ : Union[str, Any] = partition(_A , _A , _A )
quick_sort_random(
_A , _A , _A ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_A , pivot_index + 1 , _A ) # recursive quicksort to the right of the pivot point
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[Any] = input("""Enter numbers separated by a comma:\n""" ).strip()
snake_case_ : Tuple = [int(_A ) for item in user_input.split(""",""" )]
quick_sort_random(_A , 0 , len(_A ) )
print(_A )
if __name__ == "__main__":
main() | 719 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowercase__ , unittest.TestCase ):
lowercase : List[str] = KandinskyVaaPriorPipeline
lowercase : Optional[Any] = ['prompt']
lowercase : Dict = ['prompt', 'negative_prompt']
lowercase : Dict = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
lowercase : int = False
@property
def a__ ( self :int ):
return 3_2
@property
def a__ ( self :Any ):
return 3_2
@property
def a__ ( self :Union[str, Any] ):
return self.time_input_dim
@property
def a__ ( self :int ):
return self.time_input_dim * 4
@property
def a__ ( self :Dict ):
return 1_0_0
@property
def a__ ( self :List[Any] ):
snake_case_ : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def a__ ( self :int ):
torch.manual_seed(0 )
snake_case_ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=3_7 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,)
return CLIPTextModelWithProjection(_UpperCamelCase )
@property
def a__ ( self :Dict ):
torch.manual_seed(0 )
snake_case_ : Tuple = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 1_2,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
snake_case_ : int = PriorTransformer(**_UpperCamelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
snake_case_ : Any = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def a__ ( self :Optional[int] ):
torch.manual_seed(0 )
snake_case_ : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=2_2_4 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=3_7 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1_4 ,)
snake_case_ : Optional[Any] = CLIPVisionModelWithProjection(_UpperCamelCase )
return model
@property
def a__ ( self :List[Any] ):
snake_case_ : Any = CLIPImageProcessor(
crop_size=2_2_4 ,do_center_crop=_UpperCamelCase ,do_normalize=_UpperCamelCase ,do_resize=_UpperCamelCase ,image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] ,image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] ,resample=3 ,size=2_2_4 ,)
return image_processor
def a__ ( self :List[Any] ):
snake_case_ : Tuple = self.dummy_prior
snake_case_ : Any = self.dummy_image_encoder
snake_case_ : Optional[int] = self.dummy_text_encoder
snake_case_ : Any = self.dummy_tokenizer
snake_case_ : Union[str, Any] = self.dummy_image_processor
snake_case_ : Tuple = UnCLIPScheduler(
variance_type="""fixed_small_log""" ,prediction_type="""sample""" ,num_train_timesteps=1_0_0_0 ,clip_sample=_UpperCamelCase ,clip_sample_range=10.0 ,)
snake_case_ : Tuple = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def a__ ( self :Dict ,_UpperCamelCase :str ,_UpperCamelCase :Union[str, Any]=0 ):
if str(_UpperCamelCase ).startswith("""mps""" ):
snake_case_ : Optional[int] = torch.manual_seed(_UpperCamelCase )
else:
snake_case_ : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
snake_case_ : Dict = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def a__ ( self :int ):
snake_case_ : Union[str, Any] = """cpu"""
snake_case_ : int = self.get_dummy_components()
snake_case_ : int = self.pipeline_class(**_UpperCamelCase )
snake_case_ : Union[str, Any] = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ : Any = pipe(**self.get_dummy_inputs(_UpperCamelCase ) )
snake_case_ : List[Any] = output.image_embeds
snake_case_ : List[Any] = pipe(
**self.get_dummy_inputs(_UpperCamelCase ) ,return_dict=_UpperCamelCase ,)[0]
snake_case_ : Dict = image[0, -1_0:]
snake_case_ : Optional[Any] = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
snake_case_ : Optional[int] = np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a__ ( self :List[Any] ):
snake_case_ : Any = torch_device == """cpu"""
snake_case_ : Any = True
snake_case_ : Dict = False
self._test_inference_batch_single_identical(
test_max_difference=_UpperCamelCase ,relax_max_difference=_UpperCamelCase ,test_mean_pixel_difference=_UpperCamelCase ,)
@skip_mps
def a__ ( self :str ):
snake_case_ : str = torch_device == """cpu"""
snake_case_ : List[str] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_UpperCamelCase ,test_mean_pixel_difference=_UpperCamelCase ,) | 267 | 0 |
"""simple docstring"""
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 449 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ : List[str] = '▁'
snake_case_ : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class lowercase__ ( snake_case_, unittest.TestCase ):
'''simple docstring'''
_snake_case = BertGenerationTokenizer
_snake_case = False
_snake_case = True
def UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase = BertGenerationTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = '''<s>'''
UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(lowerCamelCase__ ) , 1_0_0_2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = BertGenerationTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
UpperCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCamelCase = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = '''Hello World!'''
UpperCamelCase = [1_8_5_3_6, 2_2_6_0, 1_0_1]
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
UpperCamelCase = [
8_7_1,
4_1_9,
3_5_8,
9_4_6,
9_9_1,
2_5_2_1,
4_5_2,
3_5_8,
1_3_5_7,
3_8_7,
7_7_5_1,
3_5_3_6,
1_1_2,
9_8_5,
4_5_6,
1_2_6,
8_6_5,
9_3_8,
5_4_0_0,
5_7_3_4,
4_5_8,
1_3_6_8,
4_6_7,
7_8_6,
2_4_6_2,
5_2_4_6,
1_1_5_9,
6_3_3,
8_6_5,
4_5_1_9,
4_5_7,
5_8_2,
8_5_2,
2_5_5_7,
4_2_7,
9_1_6,
5_0_8,
4_0_5,
3_4_3_2_4,
4_9_7,
3_9_1,
4_0_8,
1_1_3_4_2,
1_2_4_4,
3_8_5,
1_0_0,
9_3_8,
9_8_5,
4_5_6,
5_7_4,
3_6_2,
1_2_5_9_7,
3_2_0_0,
3_1_2_9,
1_1_7_2,
]
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@require_torch
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
UpperCamelCase = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
UpperCamelCase = ''' '''.join(lowerCamelCase__ )
UpperCamelCase = self.big_tokenizer.encode_plus(lowerCamelCase__ , return_tensors='''pt''' , return_token_type_ids=lowerCamelCase__ )
UpperCamelCase = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=lowerCamelCase__ )
UpperCamelCase = BertGenerationConfig()
UpperCamelCase = BertGenerationEncoder(lowerCamelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase__ )
model(**lowerCamelCase__ )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = {'''input_ids''': [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 212 | 0 |
'''simple docstring'''
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_snake_case = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
_snake_case = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
_snake_case = spec.loader.load_module()
_snake_case = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_snake_case = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
_snake_case = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
lowerCamelCase : str = False
# source code of `config_class`
lowerCamelCase : Tuple = inspect.getsource(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any = _re_checkpoint.findall(SCREAMING_SNAKE_CASE_ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowerCamelCase , lowerCamelCase : Dict = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowerCamelCase : int = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowerCamelCase : Optional[int] = True
break
lowerCamelCase : Tuple = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCamelCase : str = "\n".join(sorted(SCREAMING_SNAKE_CASE_ ) )
raise ValueError(f"""The following configurations don\'t contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 702 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def lowercase_( SCREAMING_SNAKE_CASE_ = "isbn/0140328726" ):
'''simple docstring'''
lowerCamelCase : List[Any] = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
lowerCamelCase : Tuple = f"""{olid} is not a valid Open Library olid"""
raise ValueError(SCREAMING_SNAKE_CASE_ )
return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json()
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
lowerCamelCase : Tuple = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
lowerCamelCase : Union[str, Any] = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
lowerCamelCase : str = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Any = ", ".join(SCREAMING_SNAKE_CASE_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_snake_case = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
_snake_case = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print('''\n'''.join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 231 | 0 |
"""simple docstring"""
lowerCAmelCase__ = [
(1000, '''M'''),
(900, '''CM'''),
(500, '''D'''),
(400, '''CD'''),
(100, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def snake_case_ ( A_ : str ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : int = 0
while place < len(A_ ):
if (place + 1 < len(A_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def snake_case_ ( A_ : int ):
'''simple docstring'''
_lowerCamelCase : str = []
for arabic, roman in ROMAN:
((_lowerCamelCase) , (_lowerCamelCase)) : int = divmod(A_, A_ )
result.append(roman * factor )
if number == 0:
break
return "".join(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 |
from math import factorial
def _UpperCAmelCase ( A = 20 ):
'''simple docstring'''
UpperCAmelCase__ =2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
UpperCAmelCase__ =n // 2
return int(factorial(A ) / (factorial(A ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
UpperCamelCase_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 625 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a : List[str] = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 85 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Union[str, Any] = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct_text_model"
__lowerCamelCase = ["past_key_values"]
__lowerCamelCase = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=50244 , snake_case__=768 , snake_case__=64 , snake_case__=2048 , snake_case__=12 , snake_case__=12 , snake_case__=32 , snake_case__=128 , snake_case__=0.1 , snake_case__=1e-6 , snake_case__=1.0 , snake_case__="gelu_new" , snake_case__=0 , snake_case__=False , snake_case__=0 , snake_case__=1 , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
lowercase__ : int= vocab_size
lowercase__ : Optional[Any]= hidden_size
lowercase__ : Tuple= d_kv
lowercase__ : Optional[int]= d_ff
lowercase__ : Any= num_layers
lowercase__ : Dict= num_heads
lowercase__ : List[Any]= relative_attention_num_buckets
lowercase__ : Optional[Any]= relative_attention_max_distance
lowercase__ : Dict= dropout_rate
lowercase__ : Tuple= layer_norm_epsilon
lowercase__ : str= initializer_factor
lowercase__ : Any= use_cache
lowercase__ : Optional[int]= eos_token_id
lowercase__ : str= decoder_start_token_id
# for backwards compatibility
lowercase__ : Optional[Any]= dense_act_fn
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , )
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
lowercase__, lowercase__ : str= cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowercase__ : str= config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case__ , **snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct_vision_model"
def __init__( self , snake_case__=768 , snake_case__=768 , snake_case__=2048 , snake_case__=64 , snake_case__=12 , snake_case__=12 , snake_case__="gelu_new" , snake_case__=1e-6 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1e-10 , snake_case__=1.0 , snake_case__=4096 , snake_case__=32 , snake_case__=128 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase__ : Tuple= hidden_size
lowercase__ : Tuple= patch_embed_hidden_size
lowercase__ : Optional[Any]= d_ff
lowercase__ : Dict= dropout_rate
lowercase__ : Any= num_hidden_layers
lowercase__ : Optional[int]= num_attention_heads
lowercase__ : Dict= initializer_range
lowercase__ : Tuple= initializer_factor
lowercase__ : Tuple= attention_dropout
lowercase__ : Optional[Any]= layer_norm_eps
lowercase__ : List[Any]= dense_act_fn
lowercase__ : str= seq_len
lowercase__ : List[str]= relative_attention_num_buckets
lowercase__ : Union[str, Any]= relative_attention_max_distance
lowercase__ : Dict= d_kv
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
lowercase__, lowercase__ : int= cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowercase__ : Union[str, Any]= config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case__ , **snake_case__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "pix2struct"
__lowerCamelCase = True
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=1.0 , snake_case__=0.02 , snake_case__=False , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ )
if text_config is None:
lowercase__ : List[Any]= {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
lowercase__ : str= {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
lowercase__ : str= PixaStructTextConfig(**snake_case__ )
lowercase__ : Dict= PixaStructVisionConfig(**snake_case__ )
lowercase__ : int= self.text_config.decoder_start_token_id
lowercase__ : List[Any]= self.text_config.pad_token_id
lowercase__ : Any= self.text_config.eos_token_id
lowercase__ : Any= initializer_factor
lowercase__ : int= initializer_range
lowercase__ : List[str]= self.initializer_range
lowercase__ : List[str]= self.initializer_range
lowercase__ : Dict= is_vqa
@classmethod
def UpperCAmelCase_ ( cls , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= copy.deepcopy(self.__dict__ )
lowercase__ : str= self.text_config.to_dict()
lowercase__ : str= self.vision_config.to_dict()
lowercase__ : List[str]= self.__class__.model_type
return output
| 85 | 1 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _UpperCAmelCase( nn.Module ):
def __init__( self , __a = 16 , __a = 88 , __a = None , __a = 1 , __a = 0.0 , __a = 32 , __a = None , __a = False , __a = None , __a = None , __a = "geglu" , __a = None , ) -> str:
'''simple docstring'''
super().__init__()
_UpperCamelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__a , attention_head_dim=__a , in_channels=__a , num_layers=__a , dropout=__a , norm_num_groups=__a , cross_attention_dim=__a , attention_bias=__a , sample_size=__a , num_vector_embeds=__a , activation_fn=__a , num_embeds_ada_norm=__a , )
for _ in range(2)
])
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_UpperCamelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_UpperCamelCase = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_UpperCamelCase = [1, 0]
def UpperCAmelCase ( self , __a , __a , __a=None , __a=None , __a=None , __a = True , ) -> Dict:
'''simple docstring'''
_UpperCamelCase = hidden_states
_UpperCamelCase = []
_UpperCamelCase = 0
# attention_mask is not used yet
for i in range(2):
# for each of the two transformers, pass the corresponding condition tokens
_UpperCamelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_UpperCamelCase = self.transformer_index_for_condition[i]
_UpperCamelCase = self.transformers[transformer_index](
__a , encoder_hidden_states=__a , timestep=__a , cross_attention_kwargs=__a , return_dict=__a , )[0]
encoded_states.append(encoded_state - input_states)
tokens_start += self.condition_lengths[i]
_UpperCamelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_UpperCamelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__a)
| 19 | from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Optional[int]:
A : Optional[int] =parent
A : Dict =13
A : List[str] =7
A : Any =True
A : str =True
A : Optional[int] =True
A : Union[str, Any] =99
A : List[Any] =32
A : Optional[Any] =2
A : int =4
A : List[Any] =37
A : Any ='gelu'
A : Optional[Any] =0.1
A : Optional[Any] =0.1
A : List[Any] =5_12
A : Optional[Any] =16
A : Optional[Any] =2
A : Dict =0.0_2
A : Dict =3
A : Union[str, Any] =4
A : int =None
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any:
A : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : List[Any] =None
if self.use_input_mask:
A : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
A : List[str] =None
A : Tuple =None
A : List[str] =None
if self.use_labels:
A : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A : List[str] =ids_tensor([self.batch_size] , self.num_choices )
A : str =EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[int]:
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : Any =self.prepare_config_and_inputs()
A : Dict =True
A : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A : Any =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
A : List[Any] =TFEsmModel(config=SCREAMING_SNAKE_CASE__ )
A : str ={'input_ids': input_ids, 'attention_mask': input_mask}
A : str =model(SCREAMING_SNAKE_CASE__ )
A : Any =[input_ids, input_mask]
A : Optional[Any] =model(SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , ) -> Optional[int]:
A : List[Any] =True
A : List[Any] =TFEsmModel(config=SCREAMING_SNAKE_CASE__ )
A : Dict ={
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
A : Union[str, Any] =model(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =[input_ids, input_mask]
A : Dict =model(SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ )
# Also check the case where encoder outputs are not passed
A : Optional[Any] =model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
A : Any =TFEsmForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
A : Optional[int] =self.num_labels
A : List[str] =TFEsmForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
A : Optional[int] ={'input_ids': input_ids, 'attention_mask': input_mask}
A : Optional[int] =model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Tuple:
A : Optional[Any] =self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : List[str] =config_and_inputs
A : str ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : Dict = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase : Tuple = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase : str = False
lowercase : Optional[Any] = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[Any]:
A : Any =TFEsmModelTester(self )
A : Any =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[int]:
A : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[Any]:
A : Optional[int] =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[str]:
A : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> int:
A : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Optional[Any] =TFEsmModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@unittest.skip('Protein models do not support embedding resizing.' )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
pass
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[int]:
A , A : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Union[str, Any] =model_class(SCREAMING_SNAKE_CASE__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
A : Any =model.get_bias()
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for k, v in name.items():
assert isinstance(SCREAMING_SNAKE_CASE__ , tf.Variable )
else:
A : List[Any] =model.get_output_embeddings()
assert x is None
A : Optional[Any] =model.get_bias()
assert name is None
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Optional[int]:
A : Optional[Any] =TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A : Any =tf.constant([[0, 1, 2, 3, 4, 5]] )
A : Dict =model(SCREAMING_SNAKE_CASE__ )[0]
A : str =[1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , SCREAMING_SNAKE_CASE__ )
# compare the actual values for a slice.
A : Dict =tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[Any]:
A : Optional[int] =TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A : Union[str, Any] =tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A : str =model(SCREAMING_SNAKE_CASE__ )[0]
# compare the actual values for a slice.
A : str =tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 305 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Union[str, Any] = KandinskyVaaImgaImgPipeline
__UpperCamelCase : Optional[int] = ["image_embeds", "negative_image_embeds", "image"]
__UpperCamelCase : Dict = [
"image_embeds",
"negative_image_embeds",
"image",
]
__UpperCamelCase : List[Any] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__UpperCamelCase : int = False
@property
def __UpperCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
return 3_2
@property
def __UpperCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
return 3_2
@property
def __UpperCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
return 1_0_0
@property
def __UpperCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_a : Any ={
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_a : Dict =UNetaDConditionModel(**SCREAMING_SNAKE_CASE )
return model
@property
def __UpperCAmelCase ( self :Any ) -> int:
'''simple docstring'''
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
_a : Tuple =VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCAmelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
_a : str =self.dummy_unet
_a : Any =self.dummy_movq
_a : Dict ={
"""num_train_timesteps""": 1_0_0_0,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00_085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_a : int =DDIMScheduler(**SCREAMING_SNAKE_CASE )
_a : Any ={
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Dict=0 ) -> Union[str, Any]:
'''simple docstring'''
_a : Optional[int] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE )
_a : List[str] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE )
# create init_image
_a : Union[str, Any] =floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a : str =Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((2_5_6, 2_5_6) )
if str(SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
_a : List[str] =torch.manual_seed(SCREAMING_SNAKE_CASE )
else:
_a : Optional[int] =torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] ={
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 1_0,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def __UpperCAmelCase ( self :List[str] ) -> str:
'''simple docstring'''
_a : int ="""cpu"""
_a : Any =self.get_dummy_components()
_a : int =self.pipeline_class(**SCREAMING_SNAKE_CASE )
_a : Optional[Any] =pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
_a : Any =pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) )
_a : Union[str, Any] =output.images
_a : Any =pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) , return_dict=SCREAMING_SNAKE_CASE , )[0]
_a : int =image[0, -3:, -3:, -1]
_a : Optional[Any] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_a : Dict =np.array(
[0.6_199_778, 0.63_984_406, 0.46_145_785, 0.62_944_984, 0.5_622_215, 0.47_306_132, 0.47_441_456, 0.4_607_606, 0.48_719_263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCAmelCase ( self :Tuple ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
_a : Dict =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
_a : List[str] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_a : List[Any] ="""A red cartoon frog, 4k"""
_a : Any =KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE )
_a : Any =KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
_a : str =pipeline.to(SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
_a : Any =torch.Generator(device="""cpu""" ).manual_seed(0 )
_a : Any =pipe_prior(
SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_a : Optional[Any] =pipeline(
image=SCREAMING_SNAKE_CASE , image_embeds=SCREAMING_SNAKE_CASE , negative_image_embeds=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="""np""" , )
_a : List[Any] =output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 718 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
A__: Dict = logging.get_logger(__name__)
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Tuple = ["pixel_values"]
def __init__( self :Optional[Any] , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE :Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE :Optional[Any] , ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : int =size if size is not None else {"""shortest_edge""": 2_5_6}
_a : int =get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
_a : Tuple =get_size_dict(SCREAMING_SNAKE_CASE , param_name="""crop_size""" )
_a : Tuple =do_resize
_a : Optional[Any] =size
_a : Any =resample
_a : Any =do_center_crop
_a : Optional[int] =crop_size
_a : int =do_rescale
_a : Union[str, Any] =rescale_factor
_a : List[Any] =do_normalize
_a : Optional[int] =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : int =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :int , ) -> np.ndarray:
'''simple docstring'''
_a : List[str] =get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
_a : Union[str, Any] =get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=size["""shortest_edge"""] , default_to_square=SCREAMING_SNAKE_CASE )
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :int , ) -> np.ndarray:
'''simple docstring'''
_a : str =get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(SCREAMING_SNAKE_CASE , size=(size["""height"""], size["""width"""]) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :List[Any] ) -> np.ndarray:
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :Union[float, List[float]] , SCREAMING_SNAKE_CASE :Union[float, List[float]] , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :Dict , ) -> np.ndarray:
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :ImageInput , SCREAMING_SNAKE_CASE :Optional[bool] = None , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :PILImageResampling = None , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :Optional[bool] = None , SCREAMING_SNAKE_CASE :Optional[float] = None , SCREAMING_SNAKE_CASE :Optional[bool] = None , SCREAMING_SNAKE_CASE :Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE :Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE :Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE :Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE :str , ) -> Tuple:
'''simple docstring'''
_a : Optional[Any] =do_resize if do_resize is not None else self.do_resize
_a : List[Any] =size if size is not None else self.size
_a : List[str] =get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
_a : List[Any] =resample if resample is not None else self.resample
_a : Optional[Any] =do_center_crop if do_center_crop is not None else self.do_center_crop
_a : List[Any] =crop_size if crop_size is not None else self.crop_size
_a : int =get_size_dict(SCREAMING_SNAKE_CASE , param_name="""crop_size""" )
_a : Optional[Any] =do_rescale if do_rescale is not None else self.do_rescale
_a : Optional[int] =rescale_factor if rescale_factor is not None else self.rescale_factor
_a : Optional[Any] =do_normalize if do_normalize is not None else self.do_normalize
_a : List[Any] =image_mean if image_mean is not None else self.image_mean
_a : Any =image_std if image_std is not None else self.image_std
_a : Optional[int] =make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_a : str =[to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
_a : Any =[self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
_a : int =[self.center_crop(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
_a : Optional[int] =[self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
_a : Optional[int] =[self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images]
_a : Dict =[to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
_a : Dict ={"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :List[Tuple] = None ) -> List[Any]:
'''simple docstring'''
_a : Any =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(SCREAMING_SNAKE_CASE ):
_a : List[str] =target_sizes.numpy()
_a : List[str] =[]
for idx in range(len(SCREAMING_SNAKE_CASE ) ):
_a : List[str] =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=SCREAMING_SNAKE_CASE )
_a : Any =resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE )
else:
_a : Optional[int] =logits.argmax(dim=1 )
_a : List[str] =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 506 | 0 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase_ :
a__ = None
a__ = False
a__ = False
a__ = False
a__ = None
a__ = None
a__ = False
a__ = False
a__ = False
a__ = True
a__ = None
a__ = 1
a__ = None
a__ = False
a__ = None
a__ = None
def A ( self ):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(__lowerCAmelCase ) for k, v in self.__dict__.items()} )
| 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Dict = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 | 1 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
UpperCAmelCase__ : Optional[Any] ={
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
UpperCAmelCase__ : List[str] =AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase=False ) -> List[Any]:
lowerCamelCase =create_model(
"""HTSAT-tiny""" , """roberta""" , _UpperCAmelCase , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=_UpperCAmelCase , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def _lowercase ( _UpperCAmelCase ) -> Optional[int]:
lowerCamelCase ={}
lowerCamelCase =R'.*sequential.(\d+).*'
lowerCamelCase =R'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCamelCase =key.replace(_UpperCAmelCase , _UpperCAmelCase )
if re.match(_UpperCAmelCase , _UpperCAmelCase ):
# replace sequential layers with list
lowerCamelCase =re.match(_UpperCAmelCase , _UpperCAmelCase ).group(1 )
lowerCamelCase =key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(_UpperCAmelCase )//3}.linear.""" )
elif re.match(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase =int(re.match(_UpperCAmelCase , _UpperCAmelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
lowerCamelCase =1 if projecton_layer == 0 else 2
lowerCamelCase =key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
lowerCamelCase =value
lowerCamelCase =mixed_qkv.size(0 ) // 3
lowerCamelCase =mixed_qkv[:qkv_dim]
lowerCamelCase =mixed_qkv[qkv_dim : qkv_dim * 2]
lowerCamelCase =mixed_qkv[qkv_dim * 2 :]
lowerCamelCase =query_layer
lowerCamelCase =key_layer
lowerCamelCase =value_layer
else:
lowerCamelCase =value
return model_state_dict
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> str:
lowerCamelCase =init_clap(_UpperCAmelCase , enable_fusion=_UpperCAmelCase )
clap_model.eval()
lowerCamelCase =clap_model.state_dict()
lowerCamelCase =rename_state_dict(_UpperCAmelCase )
lowerCamelCase =ClapConfig()
lowerCamelCase =enable_fusion
lowerCamelCase =ClapModel(_UpperCAmelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
transformers_config.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
UpperCAmelCase__ : Union[str, Any] =parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 717 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __A ( a ):
@staticmethod
@abstractmethod
def _snake_case ( UpperCAmelCase_ ):
raise NotImplementedError()
@abstractmethod
def _snake_case ( self ):
raise NotImplementedError()
| 269 | 0 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowercase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCamelCase : List[Any] = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list[int]:
if not isinstance(lowercase ,lowercase ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
snake_case : Tuple = []
for num in range(len(lowercase ) ):
snake_case : Tuple = 0
while 2 * i * i <= odd_composites[num]:
snake_case : int = odd_composites[num] - 2 * i * i
if is_prime(lowercase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowercase ) == n:
return list_nums
return []
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 587 |
'''simple docstring'''
def snake_case ( snake_case : list ) -> list:
"""simple docstring"""
if any(not isinstance(snake_case , snake_case ) or x < 0 for x in sequence ):
raise TypeError('Sequence must be list of non-negative integers' )
for _ in range(len(snake_case ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(snake_case , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 284 | 0 |
'''simple docstring'''
import os
from pathlib import Path
def __UpperCamelCase ( ):
from torch.utils.cpp_extension import load
lowercase__ : Any = Path(UpperCAmelCase ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
lowercase__ : List[str] = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , UpperCAmelCase , with_cuda=UpperCAmelCase , extra_include_paths=[str(UpperCAmelCase )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 428 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a: Optional[int] = {
"""configuration_mobilebert""": [
"""MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileBertConfig""",
"""MobileBertOnnxConfig""",
],
"""tokenization_mobilebert""": ["""MobileBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Union[str, Any] = ["""MobileBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: int = [
"""MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileBertForMaskedLM""",
"""MobileBertForMultipleChoice""",
"""MobileBertForNextSentencePrediction""",
"""MobileBertForPreTraining""",
"""MobileBertForQuestionAnswering""",
"""MobileBertForSequenceClassification""",
"""MobileBertForTokenClassification""",
"""MobileBertLayer""",
"""MobileBertModel""",
"""MobileBertPreTrainedModel""",
"""load_tf_weights_in_mobilebert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: str = [
"""TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileBertForMaskedLM""",
"""TFMobileBertForMultipleChoice""",
"""TFMobileBertForNextSentencePrediction""",
"""TFMobileBertForPreTraining""",
"""TFMobileBertForQuestionAnswering""",
"""TFMobileBertForSequenceClassification""",
"""TFMobileBertForTokenClassification""",
"""TFMobileBertMainLayer""",
"""TFMobileBertModel""",
"""TFMobileBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__a: List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 428 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : Any = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 347 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCAmelCase__ : int = logging.get_logger(__name__)
def _a ( __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def _a ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Optional[str] , __lowerCAmelCase : Optional[str] ):
"""simple docstring"""
snake_case__ : Optional[int] = to_pil_image(__lowerCAmelCase )
snake_case__ , snake_case__ : Union[str, Any] = pil_image.size
snake_case__ : int = pytesseract.image_to_data(__lowerCAmelCase , lang=__lowerCAmelCase , output_type='''dict''' , config=__lowerCAmelCase )
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
snake_case__ : Optional[Any] = [idx for idx, word in enumerate(__lowerCAmelCase ) if not word.strip()]
snake_case__ : Optional[int] = [word for idx, word in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
snake_case__ : Optional[int] = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
snake_case__ : Optional[int] = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
snake_case__ : Optional[int] = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
snake_case__ : List[str] = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
snake_case__ : Any = []
for x, y, w, h in zip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : int = [x, y, x + w, y + h]
actual_boxes.append(__lowerCAmelCase )
# finally, normalize the bounding boxes
snake_case__ : int = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = ["""pixel_values"""]
def __init__( self : Any , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = PILImageResampling.BILINEAR , snake_case_ : bool = True , snake_case_ : float = 1 / 2_5_5 , snake_case_ : bool = True , snake_case_ : Union[float, Iterable[float]] = None , snake_case_ : Union[float, Iterable[float]] = None , snake_case_ : bool = True , snake_case_ : Optional[str] = None , snake_case_ : Optional[str] = "" , **snake_case_ : Tuple , ):
'''simple docstring'''
super().__init__(**snake_case_ )
snake_case__ : List[Any] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
snake_case__ : Optional[Any] = get_size_dict(snake_case_ )
snake_case__ : Any = do_resize
snake_case__ : Optional[int] = size
snake_case__ : Any = resample
snake_case__ : Any = do_rescale
snake_case__ : Any = rescale_value
snake_case__ : str = do_normalize
snake_case__ : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case__ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
snake_case__ : List[str] = apply_ocr
snake_case__ : Tuple = ocr_lang
snake_case__ : List[str] = tesseract_config
def __magic_name__ ( self : List[str] , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : PILImageResampling = PILImageResampling.BILINEAR , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Any , ):
'''simple docstring'''
snake_case__ : Optional[Any] = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
snake_case__ : List[str] = (size['''height'''], size['''width'''])
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __magic_name__ ( self : Optional[Any] , snake_case_ : np.ndarray , snake_case_ : Union[int, float] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : List[Any] , ):
'''simple docstring'''
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __magic_name__ ( self : int , snake_case_ : np.ndarray , snake_case_ : Union[float, Iterable[float]] , snake_case_ : Union[float, Iterable[float]] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : str , ):
'''simple docstring'''
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __magic_name__ ( self : Any , snake_case_ : ImageInput , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : List[Any]=None , snake_case_ : bool = None , snake_case_ : float = None , snake_case_ : bool = None , snake_case_ : Union[float, Iterable[float]] = None , snake_case_ : Union[float, Iterable[float]] = None , snake_case_ : bool = None , snake_case_ : Optional[str] = None , snake_case_ : Optional[str] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : ChannelDimension = ChannelDimension.FIRST , **snake_case_ : Tuple , ):
'''simple docstring'''
snake_case__ : List[Any] = do_resize if do_resize is not None else self.do_resize
snake_case__ : Tuple = size if size is not None else self.size
snake_case__ : Union[str, Any] = get_size_dict(snake_case_ )
snake_case__ : Union[str, Any] = resample if resample is not None else self.resample
snake_case__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ : Optional[int] = image_mean if image_mean is not None else self.image_mean
snake_case__ : int = image_std if image_std is not None else self.image_std
snake_case__ : int = apply_ocr if apply_ocr is not None else self.apply_ocr
snake_case__ : List[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
snake_case__ : str = tesseract_config if tesseract_config is not None else self.tesseract_config
snake_case__ : Union[str, Any] = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
snake_case__ : Optional[int] = [to_numpy_array(snake_case_ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
snake_case__ : List[Any] = []
snake_case__ : Dict = []
for image in images:
snake_case__ , snake_case__ : List[Any] = apply_tesseract(snake_case_ , snake_case_ , snake_case_ )
words_batch.append(snake_case_ )
boxes_batch.append(snake_case_ )
if do_resize:
snake_case__ : List[Any] = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_rescale:
snake_case__ : Optional[Any] = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
snake_case__ : List[str] = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
snake_case__ : Optional[int] = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
snake_case__ : Dict = BatchFeature(data={'''pixel_values''': images} , tensor_type=snake_case_ )
if apply_ocr:
snake_case__ : List[str] = words_batch
snake_case__ : Any = boxes_batch
return data
| 347 | 1 |
'''simple docstring'''
def __A ( lowerCAmelCase_ = 10 , lowerCAmelCase_ = 22 ):
_UpperCAmelCase : Tuple = range(1 , lowerCAmelCase_ )
_UpperCAmelCase : List[str] = range(1 , lowerCAmelCase_ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"{solution(10, 22) = }")
| 156 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case_ (self ):
_UpperCAmelCase : int = torch.nn.Linear(1_0 , 1_0 )
_UpperCAmelCase : Tuple = torch.optim.SGD(model.parameters() , 0.1 )
_UpperCAmelCase : List[str] = Accelerator()
_UpperCAmelCase : List[Any] = accelerator.prepare(lowerCAmelCase__ )
try:
pickle.loads(pickle.dumps(lowerCAmelCase__ ) )
except Exception as e:
self.fail(F"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 156 | 1 |
def snake_case__ ( lowerCamelCase_ ):
A : str = len(lowerCamelCase_ )
for i in range(1 , lowerCamelCase_ ):
A : str = collection[i]
A : str = 0
A : Tuple = i - 1
while low <= high:
A : List[str] = (low + high) // 2
if val < collection[mid]:
A : Dict = mid - 1
else:
A : str = mid + 1
for j in range(lowerCamelCase_ , lowerCamelCase_ , -1 ):
A : Tuple = collection[j - 1]
A : Tuple = val
return collection
if __name__ == "__main__":
lowercase : Tuple = input("Enter numbers separated by a comma:\n").strip()
lowercase : List[str] = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 542 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def snake_case__ ( lowerCamelCase_ ):
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case__ ( ):
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case__ ( ):
A : List[Any] = '''mock-s3-bucket'''
A : Union[str, Any] = F's3://{mock_bucket}'
A : str = extract_path_from_uri(lowerCamelCase_ )
assert dataset_path.startswith('''s3://''' ) is False
A : Tuple = '''./local/path'''
A : Tuple = extract_path_from_uri(lowerCamelCase_ )
assert dataset_path == new_dataset_path
def snake_case__ ( lowerCamelCase_ ):
A : int = is_remote_filesystem(lowerCamelCase_ )
assert is_remote is True
A : Dict = fsspec.filesystem('''file''' )
A : Tuple = is_remote_filesystem(lowerCamelCase_ )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , lowerCamelCase_ )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
A : List[str] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
A : List[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
A : int = F'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase_ )
A : Dict = fsspec.filesystem(compression_fs_class.protocol , fo=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
A : str = os.path.basename(lowerCamelCase_ )
A : str = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(lowerCamelCase_ , '''r''' , encoding='''utf-8''' ) as f, open(lowerCamelCase_ , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
A : Union[str, Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
A : int = compressed_file_paths[protocol]
A : List[str] = '''dataset.jsonl'''
A : List[str] = F'{protocol}://{member_file_path}::{compressed_file_path}'
A , *A : List[Any] = fsspec.get_fs_token_paths(lowerCamelCase_ )
assert fs.isfile(lowerCamelCase_ )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
A : List[Any] = hf_api.dataset_info(lowerCamelCase_ , token=lowerCamelCase_ )
A : List[str] = HfFileSystem(repo_info=lowerCamelCase_ , token=lowerCamelCase_ )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(lowerCamelCase_ ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def snake_case__ ( ):
A : Tuple = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(lowerCamelCase_ , lowerCamelCase_ , clobber=lowerCamelCase_ )
with pytest.warns(lowerCamelCase_ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(lowerCamelCase_ ) == 1
assert (
str(warning_info[0].message )
== F'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 542 | 1 |
from __future__ import annotations
from math import pi, sqrt
def _lowerCAmelCase ( __lowerCamelCase : float , __lowerCamelCase : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 447 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_lowerCamelCase = logging.getLogger(__name__)
@dataclass(frozen=UpperCamelCase )
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
@dataclass(frozen=UpperCamelCase )
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
lowerCAmelCase = 42
def __init__( self : str , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : str , UpperCamelCase : Optional[int] = None , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : bool = False , )->List[str]:
__SCREAMING_SNAKE_CASE : int = hans_processors[task]()
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(
UpperCamelCase , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(UpperCamelCase ) , UpperCamelCase , ) , )
__SCREAMING_SNAKE_CASE : Optional[Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = label_list[2], label_list[1]
__SCREAMING_SNAKE_CASE : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__SCREAMING_SNAKE_CASE : str = cached_features_file + ".lock"
with FileLock(UpperCamelCase ):
if os.path.exists(UpperCamelCase ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(UpperCamelCase )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
processor.get_dev_examples(UpperCamelCase ) if evaluate else processor.get_train_examples(UpperCamelCase )
)
logger.info("Training examples: %s" , len(UpperCamelCase ) )
__SCREAMING_SNAKE_CASE : Any = hans_convert_examples_to_features(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
logger.info("Saving features into cached file %s" , UpperCamelCase )
torch.save(self.features , UpperCamelCase )
def __len__( self : Tuple )->Any:
return len(self.features )
def __getitem__( self : Union[str, Any] , UpperCamelCase : int )->InputFeatures:
return self.features[i]
def __snake_case ( self : int )->Tuple:
return self.label_list
if is_tf_available():
import tensorflow as tf
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42
def __init__( self : Dict , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : str , UpperCamelCase : Optional[int] = 1_2_8 , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : bool = False , )->str:
__SCREAMING_SNAKE_CASE : str = hans_processors[task]()
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = label_list[2], label_list[1]
__SCREAMING_SNAKE_CASE : Union[str, Any] = label_list
__SCREAMING_SNAKE_CASE : str = processor.get_dev_examples(UpperCamelCase ) if evaluate else processor.get_train_examples(UpperCamelCase )
__SCREAMING_SNAKE_CASE : str = hans_convert_examples_to_features(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(UpperCamelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__SCREAMING_SNAKE_CASE : List[str] = tf.data.Dataset.from_generator(
UpperCamelCase , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def __snake_case ( self : List[Any] )->str:
return self.dataset
def __len__( self : Tuple )->List[str]:
return len(self.features )
def __getitem__( self : Optional[Any] , UpperCamelCase : Tuple )->InputFeatures:
return self.features[i]
def __snake_case ( self : List[Any] )->Optional[int]:
return self.label_list
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
def __snake_case ( self : List[Any] , UpperCamelCase : Union[str, Any] )->Tuple:
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase , "heuristics_train_set.txt" ) ) , "train" )
def __snake_case ( self : List[str] , UpperCamelCase : Optional[int] )->Any:
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase , "heuristics_evaluation_set.txt" ) ) , "dev" )
def __snake_case ( self : Optional[int] )->Tuple:
return ["contradiction", "entailment", "neutral"]
def __snake_case ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Any )->Tuple:
__SCREAMING_SNAKE_CASE : Optional[int] = []
for i, line in enumerate(UpperCamelCase ):
if i == 0:
continue
__SCREAMING_SNAKE_CASE : str = "%s-%s" % (set_type, line[0])
__SCREAMING_SNAKE_CASE : List[str] = line[5]
__SCREAMING_SNAKE_CASE : List[str] = line[6]
__SCREAMING_SNAKE_CASE : Optional[Any] = line[7][2:] if line[7].startswith("ex" ) else line[7]
__SCREAMING_SNAKE_CASE : Optional[Any] = line[0]
examples.append(InputExample(guid=UpperCamelCase , text_a=UpperCamelCase , text_b=UpperCamelCase , label=UpperCamelCase , pairID=UpperCamelCase ) )
return examples
def _lowerCAmelCase ( __lowerCamelCase : List[InputExample] , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : PreTrainedTokenizer , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = {label: i for i, label in enumerate(__lowerCamelCase )}
__SCREAMING_SNAKE_CASE : Any = []
for ex_index, example in tqdm.tqdm(enumerate(__lowerCamelCase ) , desc="convert examples to features" ):
if ex_index % 10000 == 0:
logger.info("Writing example %d" % (ex_index) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=__lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" , truncation=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , )
__SCREAMING_SNAKE_CASE : List[str] = label_map[example.label] if example.label in label_map else 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(example.pairID )
features.append(InputFeatures(**__lowerCamelCase , label=__lowerCamelCase , pairID=__lowerCamelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
_lowerCamelCase = {
"""hans""": 3,
}
_lowerCamelCase = {
"""hans""": HansProcessor,
}
| 447 | 1 |
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
if index == r:
for j in range(snake_case_ ):
print(data[j],end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
_A : List[Any] = arr[i]
combination_util(snake_case_,snake_case_,snake_case_,index + 1,snake_case_,i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# A temporary array to store all combination one by one
_A : Any = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(snake_case_,snake_case_,snake_case_,0,snake_case_,0 )
if __name__ == "__main__":
# Driver code to check the function above
_snake_case = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 307 |
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Union[str, Any] = len(snake_case_ )
_A : str = [[0] * n for i in range(snake_case_ )]
for i in range(snake_case_ ):
_A : Optional[Any] = y_points[i]
for i in range(2,snake_case_ ):
for j in range(snake_case_,snake_case_ ):
_A : List[Any] = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307 | 1 |
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : str = logging.get_logger()
# the current default level is logging.WARNING
UpperCAmelCase_ : Union[str, Any] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = logging.get_verbosity()
UpperCAmelCase_ : List[str] = logging.get_logger('transformers.models.bart.tokenization_bart' )
UpperCAmelCase_ : int = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(snake_case_ )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def _UpperCamelCase ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCAmelCase_ : int = logging.get_logger('transformers.models.bart.tokenization_bart' )
UpperCAmelCase_ : Optional[Any] = os.getenv('TRANSFORMERS_VERBOSITY' , snake_case_ )
UpperCAmelCase_ : List[Any] = logging.log_levels[env_level_str]
UpperCAmelCase_ : Dict = logging.get_verbosity()
self.assertEqual(
snake_case_ , snake_case_ , F'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
UpperCAmelCase_ : Tuple = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def _UpperCamelCase ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
UpperCAmelCase_ : Tuple = logging.logging.getLogger()
with CaptureLogger(snake_case_ ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def _UpperCamelCase ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
UpperCAmelCase_ : int = logging.get_logger('transformers.models.bart.tokenization_bart' )
UpperCAmelCase_ : List[Any] = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , msg + '\n' )
def _lowerCamelCase ( ):
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 715 | '''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Any = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('xlm-roberta-base' )
UpperCAmelCase_ : Tuple = 'The dog is cute and lives in the garden house'
UpperCAmelCase_ : Dict = jnp.array([tokenizer.encode(snake_case_ )] )
UpperCAmelCase_ : str = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ : List[str] = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
UpperCAmelCase_ : str = model(snake_case_ )['last_hidden_state']
self.assertEqual(output.shape , snake_case_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , snake_case_ , atol=1E-3 ) )
| 389 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"""weiweishi/roc-bert-base-zh""": """https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json""",
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
_A : Union[str, Any] = '''roc_bert'''
def __init__( self : Tuple , _a : List[Any]=30_522 , _a : Optional[int]=768 , _a : List[str]=12 , _a : str=12 , _a : Tuple=3_072 , _a : int="gelu" , _a : Optional[Any]=0.1 , _a : Any=0.1 , _a : Optional[int]=512 , _a : Optional[int]=2 , _a : List[str]=0.02 , _a : Optional[Any]=1E-12 , _a : Tuple=True , _a : Union[str, Any]=0 , _a : Any="absolute" , _a : str=None , _a : int=True , _a : str=True , _a : List[Any]=768 , _a : Any=910 , _a : str=512 , _a : Union[str, Any]=24_858 , _a : Tuple=True , **_a : List[str] , ):
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_cache
UpperCamelCase__ = enable_pronunciation
UpperCamelCase__ = enable_shape
UpperCamelCase__ = pronunciation_embed_dim
UpperCamelCase__ = pronunciation_vocab_size
UpperCamelCase__ = shape_embed_dim
UpperCamelCase__ = shape_vocab_size
UpperCamelCase__ = concat_input
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = classifier_dropout
super().__init__(pad_token_id=_A , **_A )
| 240 |
from math import isclose, sqrt
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = point_y / 4 / point_x
__SCREAMING_SNAKE_CASE : int = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__SCREAMING_SNAKE_CASE : Tuple = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__SCREAMING_SNAKE_CASE : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__SCREAMING_SNAKE_CASE : int = outgoing_gradient**2 + 4
__SCREAMING_SNAKE_CASE : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__SCREAMING_SNAKE_CASE : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100
__SCREAMING_SNAKE_CASE : str = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__SCREAMING_SNAKE_CASE : int = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__SCREAMING_SNAKE_CASE : Dict = x_minus if isclose(snake_case , snake_case ) else x_plus
__SCREAMING_SNAKE_CASE : Dict = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def a__ ( snake_case = 1.4 , snake_case = -9.6 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : float = first_x_coord
__SCREAMING_SNAKE_CASE : float = first_y_coord
__SCREAMING_SNAKE_CASE : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = next_point(snake_case , snake_case , snake_case )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 74 | 0 |
snake_case_ : int = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def A (__A : dict , __A : List[Any] , __A : Any ) -> list[str]:
"""simple docstring"""
UpperCAmelCase_ = set()
# keep track of all the paths to be checked
UpperCAmelCase_ = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCAmelCase_ = queue.pop(0 )
# get the last node from the path
UpperCAmelCase_ = path[-1]
if node not in explored:
UpperCAmelCase_ = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCAmelCase_ = list(__A )
new_path.append(__A )
queue.append(__A )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__A )
# in case there's no path between the 2 nodes
return []
def A (__A : dict , __A : Tuple , __A : str ) -> int:
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCAmelCase_ = [start]
UpperCAmelCase_ = set(__A )
# Keep tab on distances from `start` node.
UpperCAmelCase_ = {start: 0, target: -1}
while queue:
UpperCAmelCase_ = queue.pop(0 )
if node == target:
UpperCAmelCase_ = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__A )
queue.append(__A )
UpperCAmelCase_ = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 700 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
snake_case_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
snake_case_ , snake_case_ : Tuple = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
snake_case_ : List[str] = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
snake_case_ : Tuple = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
snake_case_ : int = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 169 | 0 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=2 , lowercase__=24 , lowercase__=16 , lowercase__=True , lowercase__=True , lowercase__=32 , lowercase__=5 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=10 , lowercase__=0.0_2 , lowercase__=None , lowercase__=2 , lowercase__=2 , ) -> Tuple:
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : int = patch_size
SCREAMING_SNAKE_CASE : Tuple = max_length
SCREAMING_SNAKE_CASE : Any = num_mel_bins
SCREAMING_SNAKE_CASE : str = is_training
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = type_sequence_label_size
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = scope
SCREAMING_SNAKE_CASE : str = frequency_stride
SCREAMING_SNAKE_CASE : Tuple = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
SCREAMING_SNAKE_CASE : List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
SCREAMING_SNAKE_CASE : str = (self.max_length - self.patch_size) // self.time_stride + 1
SCREAMING_SNAKE_CASE : Optional[Any] = frequency_out_dimension * time_out_dimension
SCREAMING_SNAKE_CASE : int = num_patches + 2
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE : int = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, input_values, labels
def _UpperCamelCase ( self ) -> int:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : int = ASTModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : str = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE : str = {'input_values': input_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Optional[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
snake_case__ : List[Any] = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
snake_case__ : List[str] = False
snake_case__ : List[Any] = False
snake_case__ : Tuple = False
snake_case__ : List[Any] = False
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Optional[Any] = ASTModelTester(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def _UpperCamelCase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def _UpperCamelCase ( self ) -> List[Any]:
pass
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : List[str] = ['input_values']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
@slow
def _UpperCamelCase ( self ) -> Tuple:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Dict = ASTModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __lowerCAmelCase ( ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = torchaudio.load(__UpperCamelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCamelCase ( self ) -> Dict:
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE : List[Any] = self.default_feature_extractor
SCREAMING_SNAKE_CASE : List[str] = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE : int = self.default_feature_extractor
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = prepare_audio()
SCREAMING_SNAKE_CASE : int = audio.squeeze().numpy()
SCREAMING_SNAKE_CASE : int = feature_extractor(_UpperCamelCase , sampling_rate=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(**_UpperCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE : str = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4 ) )
| 251 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase_ ( __UpperCamelCase : dict , __UpperCamelCase : str ) -> set[str]:
"""simple docstring"""
_A , _A = set(__UpperCamelCase ), [start]
while stack:
_A = stack.pop()
explored.add(__UpperCamelCase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__UpperCamelCase )
return explored
lowerCAmelCase = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 292 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
A__: List[str] = '''\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
'''
A__: int = '''
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
'''
A__: Optional[int] = '''
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the SQuAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]
>>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]
>>> squad_metric = datasets.load_metric("squad")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_a : int ={prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
_a : int =[
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
_a : Dict =evaluate(dataset=SCREAMING_SNAKE_CASE , predictions=SCREAMING_SNAKE_CASE )
return score
| 506 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__: Optional[int] = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: List[str] = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
A__: Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 506 | 1 |
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> Dict:
"""simple docstring"""
assert (
isinstance(__snake_case, __snake_case ) and number_of_steps > 0
), F"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
A__ = 1, 1
for _ in range(number_of_steps - 1 ):
A__ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 |
"""simple docstring"""
def lowercase ( __snake_case : Tuple ):
lowercase_ : Any = 0
lowercase_ : Union[str, Any] = len(__snake_case )
for i in range(n - 1 ):
for j in range(i + 1 , __snake_case ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowercase ( __snake_case : Union[str, Any] ):
if len(__snake_case ) <= 1:
return arr, 0
lowercase_ : Tuple = len(__snake_case ) // 2
lowercase_ : List[Any] = arr[0:mid]
lowercase_ : Optional[Any] = arr[mid:]
lowercase_ , lowercase_ : str = count_inversions_recursive(__snake_case )
lowercase_ , lowercase_ : Optional[int] = count_inversions_recursive(__snake_case )
lowercase_ , lowercase_ : List[str] = _count_cross_inversions(__snake_case , __snake_case )
lowercase_ : str = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowercase ( __snake_case : List[str] , __snake_case : Any ):
lowercase_ : Any = []
lowercase_ : Dict = 0
while i < len(__snake_case ) and j < len(__snake_case ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__snake_case ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__snake_case ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def lowercase ( ):
lowercase_ : List[str] = [1_0, 2, 1, 5, 5, 2, 1_1]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowercase_ : Any = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : Any = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __snake_case )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowercase_ : str = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : Any = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __snake_case )
# an empty list should also have zero inversions
lowercase_ : List[Any] = []
lowercase_ : Tuple = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : Optional[int] = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __snake_case )
if __name__ == "__main__":
main()
| 231 | 0 |
"""simple docstring"""
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_snake_case = logging.getLogger(__name__)
_snake_case = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _a :
a_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
a_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCAmelCase )} , )
a_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
a_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a_ : bool = field(
default=_lowerCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
a_ : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a_ : bool = field(
default=_lowerCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def _UpperCamelCase ( self : List[str] ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class _a :
a_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
a_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
a_ : Optional[str] = field(default=_lowerCAmelCase , metadata={'help': 'The input training data file (a text file).'} )
a_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
a_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , )
a_ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , )
a_ : bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
a_ : Optional[int] = field(
default=5 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
a_ : Optional[int] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} , )
a_ : Optional[int] = field(
default=_lowerCAmelCase , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
a_ : float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
a_ : bool = field(
default=_lowerCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
def _UpperCamelCase ( self : str ):
if self.train_file is not None:
lowerCamelCase__ = self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase__ = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def snake_case ( _a: Any , _a: Tuple )-> Optional[Any]:
'''simple docstring'''
with open(_a , 'r' , encoding='utf-8' ) as f:
lowerCamelCase__ = [json.loads(_a ) for line in f.read().splitlines() if (len(_a ) > 0 and not line.isspace())]
assert len(_a ) == len(_a )
lowerCamelCase__ = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase__ = refs
return Dataset.from_dict(_a )
def snake_case ( )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _a )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowerCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'train[:{data_args.validation_split_percentage}%]' , )
lowerCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'train[{data_args.validation_split_percentage}%:]' , )
else:
lowerCamelCase__ = {}
if data_args.train_file is not None:
lowerCamelCase__ = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase__ = data_args.validation_file
lowerCamelCase__ = data_args.train_file.split('.' )[-1]
if extension == "txt":
lowerCamelCase__ = 'text'
lowerCamelCase__ = load_dataset(_a , data_files=_a )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase__ = AutoConfig.from_pretrained(model_args.config_name , **_a )
elif model_args.model_name_or_path:
lowerCamelCase__ = AutoConfig.from_pretrained(model_args.model_name_or_path , **_a )
else:
lowerCamelCase__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
lowerCamelCase__ = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_a )
elif model_args.model_name_or_path:
lowerCamelCase__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_a )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
lowerCamelCase__ = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
lowerCamelCase__ = AutoModelForMaskedLM.from_config(_a )
model.resize_token_embeddings(len(_a ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase__ = datasets['train'].column_names
else:
lowerCamelCase__ = datasets['validation'].column_names
lowerCamelCase__ = 'text' if 'text' in column_names else column_names[0]
lowerCamelCase__ = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(_a: Optional[int] ):
# Remove empty lines
lowerCamelCase__ = [line for line in examples['text'] if len(_a ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=_a , truncation=_a , max_length=data_args.max_seq_length )
lowerCamelCase__ = datasets.map(
_a , batched=_a , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase__ = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowerCamelCase__ = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase__ = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase__ = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase__ = DataCollatorForWholeWordMask(tokenizer=_a , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCamelCase__ = Trainer(
model=_a , args=_a , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=_a , data_collator=_a , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase__ = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowerCamelCase__ = model_args.model_name_or_path
else:
lowerCamelCase__ = None
lowerCamelCase__ = trainer.train(resume_from_checkpoint=_a )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase__ = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(_a , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
lowerCamelCase__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCamelCase__ = trainer.evaluate()
lowerCamelCase__ = math.exp(eval_output['eval_loss'] )
lowerCamelCase__ = perplexity
lowerCamelCase__ = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(_a , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
return results
def snake_case ( _a: Optional[int] )-> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 709 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def snake_case ( )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=_a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=_a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=_a , help='where to store parsed gold_data_path file' , )
lowerCamelCase__ = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
lowerCamelCase__ = json.load(_a )
for dpr_record in tqdm(_a ):
lowerCamelCase__ = dpr_record['question']
lowerCamelCase__ = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(_a ) + '\n' )
if __name__ == "__main__":
main()
| 659 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Dict = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """swinv2"""
lowerCAmelCase_ = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , A_=224 , A_=4 , A_=3 , A_=96 , A_=[2, 2, 6, 2] , A_=[3, 6, 12, 24] , A_=7 , A_=4.0 , A_=True , A_=0.0 , A_=0.0 , A_=0.1 , A_="gelu" , A_=False , A_=0.02 , A_=1e-5 , A_=32 , **A_ , )-> List[Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = depths
UpperCamelCase = len(A_ )
UpperCamelCase = num_heads
UpperCamelCase = window_size
UpperCamelCase = mlp_ratio
UpperCamelCase = qkv_bias
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = drop_path_rate
UpperCamelCase = hidden_act
UpperCamelCase = use_absolute_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase = int(embed_dim * 2 ** (len(A_ ) - 1) )
UpperCamelCase = (0, 0, 0, 0)
| 3 |
'''simple docstring'''
def _snake_case ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : set ) -> int:
"""simple docstring"""
lowerCAmelCase, lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ), len(grid[0] )
if (
min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowerCAmelCase = 0
count += depth_first_search(_SCREAMING_SNAKE_CASE , row + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
count += depth_first_search(_SCREAMING_SNAKE_CASE , row - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col + 1 , _SCREAMING_SNAKE_CASE )
count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col - 1 , _SCREAMING_SNAKE_CASE )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 433 | 0 |
def __UpperCamelCase ( A = 50000000 ):
UpperCamelCase__ = set()
UpperCamelCase__ = int((limit - 24) ** (1 / 2) )
UpperCamelCase__ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , A ) ) )
for primea in primes:
UpperCamelCase__ = primea * primea
for primea in primes:
UpperCamelCase__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
UpperCamelCase__ = primea * primea * primea * primea
UpperCamelCase__ = square + cube + tetr
if total >= limit:
break
ret.add(A )
return len(A )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 712 | from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __UpperCamelCase ( A ):
for param in module.parameters():
UpperCamelCase__ = False
def __UpperCamelCase ( ):
UpperCamelCase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCamelCase__ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def __UpperCamelCase ( A ):
UpperCamelCase__ = plt.imshow(A )
fig.axes.get_xaxis().set_visible(A )
fig.axes.get_yaxis().set_visible(A )
plt.show()
def __UpperCamelCase ( ):
UpperCamelCase__ = datetime.now()
UpperCamelCase__ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 469 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 474 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
lowerCamelCase_ = [
20_47,
1_37_36_53,
25_32_60_01,
32_15_03_17_51,
2_15_23_02_89_87_47,
3_47_47_49_66_03_83,
3_41_55_00_71_72_83_21,
1,
3_82_51_23_05_65_46_41_30_51,
1,
1,
31_86_65_85_78_34_03_11_51_16_74_61,
3_31_70_44_06_46_79_88_73_85_96_19_81,
]
lowerCamelCase_ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(__UpperCamelCase ,1 ):
if n < _p:
# then we have our last prime to check
lowerCamelCase_ = primes[:idx]
break
lowerCamelCase_ ,lowerCamelCase_ = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCamelCase_ = False
for r in range(__UpperCamelCase ):
lowerCamelCase_ = pow(__UpperCamelCase ,d * 2**r ,__UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCamelCase_ = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _UpperCamelCase ( ) -> None:
assert not miller_rabin(5_61 )
assert miller_rabin(5_63 )
# 2047
assert not miller_rabin(83_82_01 )
assert miller_rabin(83_82_07 )
# 1_373_653
assert not miller_rabin(17_31_60_01 )
assert miller_rabin(17_31_60_17 )
# 25_326_001
assert not miller_rabin(30_78_38_66_41 )
assert miller_rabin(30_78_38_66_53 )
# 3_215_031_751
assert not miller_rabin(1_71_30_45_57_48_01 )
assert miller_rabin(1_71_30_45_57_48_19 )
# 2_152_302_898_747
assert not miller_rabin(2_77_97_99_72_83_07 )
assert miller_rabin(2_77_97_99_72_83_27 )
# 3_474_749_660_383
assert not miller_rabin(1_13_85_00_23_90_94_41 )
assert miller_rabin(1_13_85_00_23_90_95_27 )
# 341_550_071_728_321
assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 )
assert miller_rabin(1_27_50_41_01_88_48_80_43_91 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 )
assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 )
assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 42 | 0 |
'''simple docstring'''
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
UpperCamelCase = 6
UpperCamelCase = 1
UpperCamelCase = 1_901
UpperCamelCase = 0
while year < 2_001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
UpperCamelCase = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
UpperCamelCase = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
UpperCamelCase = day - days_per_month[month - 2]
if month > 12:
year += 1
UpperCamelCase = 1
if year < 2_001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 324 |
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __lowerCamelCase ( A__ , A__="shi-labs/oneformer_demo" ) -> Tuple:
"""simple docstring"""
with open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) as f:
UpperCamelCase = json.load(A__ )
UpperCamelCase = {}
UpperCamelCase = []
UpperCamelCase = []
for key, info in class_info.items():
UpperCamelCase = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(A__ ) )
UpperCamelCase = thing_ids
UpperCamelCase = class_names
return metadata
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any]=7 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Dict=3_0 , UpperCamelCase__ : Tuple=4_0_0 , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Any=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[str]=[0.5, 0.5, 0.5] , UpperCamelCase__ : List[str]=[0.5, 0.5, 0.5] , UpperCamelCase__ : Optional[int]=1_0 , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : str=2_5_5 , UpperCamelCase__ : Optional[Any]="shi-labs/oneformer_demo" , UpperCamelCase__ : Any="ade20k_panoptic.json" , UpperCamelCase__ : Dict=1_0 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = {'shortest_edge': 3_2, 'longest_edge': 1_3_3_3} if size is None else size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
UpperCamelCase = class_info_file
UpperCamelCase = prepare_metadata(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = num_text
UpperCamelCase = repo_path
# for the post_process_functions
UpperCamelCase = 2
UpperCamelCase = 1_0
UpperCamelCase = 1_0
UpperCamelCase = 3
UpperCamelCase = 4
UpperCamelCase = num_labels
UpperCamelCase = do_reduce_labels
UpperCamelCase = ignore_index
def A ( self : Dict ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A ( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ):
"""simple docstring"""
if not batched:
UpperCamelCase = image_inputs[0]
if isinstance(UpperCamelCase__ , Image.Image ):
UpperCamelCase , UpperCamelCase = image.size
else:
UpperCamelCase , UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase = int(self.size['shortest_edge'] * h / w )
UpperCamelCase = self.size['shortest_edge']
elif w > h:
UpperCamelCase = self.size['shortest_edge']
UpperCamelCase = int(self.size['shortest_edge'] * w / h )
else:
UpperCamelCase = self.size['shortest_edge']
UpperCamelCase = self.size['shortest_edge']
else:
UpperCamelCase = []
for image in image_inputs:
UpperCamelCase , UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase = max(UpperCamelCase__ , key=lambda UpperCamelCase__ : item[0] )[0]
UpperCamelCase = max(UpperCamelCase__ , key=lambda UpperCamelCase__ : item[1] )[1]
return expected_height, expected_width
def A ( self : List[str] ):
"""simple docstring"""
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_SCREAMING_SNAKE_CASE = image_processing_class
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = OneFormerImageProcessorTester(self )
@property
def A ( self : int ):
"""simple docstring"""
return self.image_processing_tester.prepare_image_processor_dict()
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'size' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'ignore_index' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'class_info_file' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'num_text' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'repo_path' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'metadata' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'do_reduce_labels' ) )
def A ( self : int ):
"""simple docstring"""
pass
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
UpperCamelCase = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processing_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase , UpperCamelCase = self.image_processing_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
UpperCamelCase = image_processor(
UpperCamelCase__ , ['semantic'] * len(UpperCamelCase__ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processing_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase , UpperCamelCase = self.image_processing_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
UpperCamelCase = image_processor(
UpperCamelCase__ , ['semantic'] * len(UpperCamelCase__ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processing_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase , UpperCamelCase = self.image_processing_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
UpperCamelCase = image_processor(
UpperCamelCase__ , ['semantic'] * len(UpperCamelCase__ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : str , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : str="np" ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCamelCase = self.image_processing_tester.num_labels
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase__ )
if with_segmentation_maps:
UpperCamelCase = num_labels
if is_instance_map:
UpperCamelCase = list(range(UpperCamelCase__ ) ) * 2
UpperCamelCase = dict(enumerate(UpperCamelCase__ ) )
UpperCamelCase = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCamelCase = [Image.fromarray(UpperCamelCase__ ) for annotation in annotations]
UpperCamelCase = image_processor(
UpperCamelCase__ , ['semantic'] * len(UpperCamelCase__ ) , UpperCamelCase__ , return_tensors='pt' , instance_id_to_semantic_id=UpperCamelCase__ , pad_and_return_pixel_mask=UpperCamelCase__ , )
return inputs
def A ( self : str ):
"""simple docstring"""
pass
def A ( self : Union[str, Any] ):
"""simple docstring"""
def common(UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : List[str]=None ):
UpperCamelCase = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCamelCase__ , is_instance_map=UpperCamelCase__ , segmentation_type=UpperCamelCase__ )
UpperCamelCase = inputs['mask_labels']
UpperCamelCase = inputs['class_labels']
UpperCamelCase = inputs['pixel_values']
UpperCamelCase = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCamelCase__ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCamelCase__ )
common(is_instance_map=UpperCamelCase__ , segmentation_type='pil' )
common(is_instance_map=UpperCamelCase__ , segmentation_type='pil' )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = np.zeros((2_0, 5_0) )
UpperCamelCase = 1
UpperCamelCase = 1
UpperCamelCase = 1
UpperCamelCase = binary_mask_to_rle(UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , 4 )
self.assertEqual(rle[0] , 2_1 )
self.assertEqual(rle[1] , 4_5 )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
UpperCamelCase = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase = fature_extractor.post_process_semantic_segmentation(UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCamelCase = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCamelCase = fature_extractor.post_process_semantic_segmentation(UpperCamelCase__ , target_sizes=UpperCamelCase__ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
UpperCamelCase = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase = image_processor.post_process_instance_segmentation(UpperCamelCase__ , threshold=0 )
self.assertTrue(len(UpperCamelCase__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase__ )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
UpperCamelCase = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase = image_processor.post_process_panoptic_segmentation(UpperCamelCase__ , threshold=0 )
self.assertTrue(len(UpperCamelCase__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase__ )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 324 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
A = logging.get_logger(__name__)
class __snake_case ( a__):
def __init__( self, *A, **A ):
"""simple docstring"""
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.', A, )
super().__init__(*A, **A )
| 320 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A = logging.get_logger(__name__)
A = {'vocab_file': 'sentencepiece.bpe.model'}
A = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
A = {
'camembert-base': 512,
}
A = '▁'
class __snake_case ( a__):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self, A, A="<s>", A="</s>", A="</s>", A="<s>", A="<unk>", A="<pad>", A="<mask>", A=["<s>NOTUSED", "</s>NOTUSED"], A = None, **A, ):
"""simple docstring"""
lowerCamelCase : List[str] = AddedToken(A, lstrip=A, rstrip=A ) if isinstance(A, A ) else mask_token
lowerCamelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A, eos_token=A, unk_token=A, sep_token=A, cls_token=A, pad_token=A, mask_token=A, additional_special_tokens=A, sp_model_kwargs=self.sp_model_kwargs, **A, )
lowerCamelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
lowerCamelCase : List[Any] = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowerCamelCase : Union[str, Any] = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
lowerCamelCase : Union[str, Any] = len(self.fairseq_tokens_to_ids )
lowerCamelCase : Tuple = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowerCamelCase : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCAmelCase_ ( self, A, A = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase : Tuple = [self.cls_token_id]
lowerCamelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self, A, A = None, A = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A, token_ids_a=A, already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def UpperCAmelCase_ ( self, A, A = None ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = [self.sep_token_id]
lowerCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
return self.sp_model.encode(A, out_type=A )
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(A ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(A )
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : int = []
lowerCamelCase : Dict = ''
lowerCamelCase : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
lowerCamelCase : int = True
lowerCamelCase : Optional[Any] = []
else:
current_sub_tokens.append(A )
lowerCamelCase : Union[str, Any] = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase : List[str] = self.__dict__.copy()
lowerCamelCase : int = None
return state
def __setstate__( self, A ):
"""simple docstring"""
lowerCamelCase : List[Any] = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
lowerCamelCase : List[str] = {}
lowerCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self, A, A = None ):
"""simple docstring"""
if not os.path.isdir(A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase : str = os.path.join(
A, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, A )
elif not os.path.isfile(self.vocab_file ):
with open(A, 'wb' ) as fi:
lowerCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 320 | 1 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
snake_case = logging.get_logger("""transformers.models.speecht5""")
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
hf_model.apply_weight_norm()
lowerCAmelCase__ : Tuple = checkpoint["input_conv.weight_g"]
lowerCAmelCase__ : Dict = checkpoint["input_conv.weight_v"]
lowerCAmelCase__ : Tuple = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
lowerCAmelCase__ : int = checkpoint[f'''upsamples.{i}.1.weight_g''']
lowerCAmelCase__ : List[Any] = checkpoint[f'''upsamples.{i}.1.weight_v''']
lowerCAmelCase__ : List[Any] = checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCAmelCase__ : List[str] = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
lowerCAmelCase__ : List[Any] = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
lowerCAmelCase__ : List[str] = checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
lowerCAmelCase__ : Optional[int] = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
lowerCAmelCase__ : Union[str, Any] = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
lowerCAmelCase__ : Union[str, Any] = checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
lowerCAmelCase__ : int = checkpoint["output_conv.1.weight_g"]
lowerCAmelCase__ : Optional[int] = checkpoint["output_conv.1.weight_v"]
lowerCAmelCase__ : str = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , ):
"""simple docstring"""
if config_path is not None:
lowerCAmelCase__ : int = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase_ )
else:
lowerCAmelCase__ : int = SpeechTaHifiGanConfig()
lowerCAmelCase__ : Tuple = SpeechTaHifiGan(lowerCamelCase_ )
lowerCAmelCase__ : Tuple = torch.load(lowerCamelCase_ )
load_weights(orig_checkpoint["model"]["generator"] , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ : int = np.load(lowerCamelCase_ )
lowerCAmelCase__ : Union[str, Any] = stats[0].reshape(-1 )
lowerCAmelCase__ : int = stats[1].reshape(-1 )
lowerCAmelCase__ : Optional[Any] = torch.from_numpy(lowerCamelCase_ ).float()
lowerCAmelCase__ : List[Any] = torch.from_numpy(lowerCamelCase_ ).float()
model.save_pretrained(lowerCamelCase_ )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(lowerCamelCase_ )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
snake_case = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 709 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
snake_case = None
snake_case = logging.get_logger(__name__)
snake_case = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
snake_case = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
snake_case = {
"""google/rembert""": 2_56,
}
snake_case = """▁"""
class lowerCAmelCase ( UpperCamelCase_ ):
A_ : Dict = VOCAB_FILES_NAMES
A_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Tuple = RemBertTokenizer
def __init__( self : Dict , a__ : int=None , a__ : List[Any]=None , a__ : List[Any]=True , a__ : Dict=True , a__ : int=False , a__ : Tuple="[CLS]" , a__ : Optional[int]="[SEP]" , a__ : Optional[Any]="<unk>" , a__ : List[str]="[SEP]" , a__ : Any="<pad>" , a__ : List[str]="[CLS]" , a__ : int="[MASK]" , **a__ : Dict , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , remove_space=a__ , keep_accents=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , **a__ , )
lowerCAmelCase__ : Dict = do_lower_case
lowerCAmelCase__ : List[str] = remove_space
lowerCAmelCase__ : Optional[int] = keep_accents
lowerCAmelCase__ : Tuple = vocab_file
lowerCAmelCase__ : Optional[int] = False if not self.vocab_file else True
def _A ( self : List[str] , a__ : List[int] , a__ : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Any = [self.sep_token_id]
lowerCAmelCase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _A ( self : List[str] , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1]
def _A ( self : Tuple , a__ : List[int] , a__ : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _A ( self : List[str] , a__ : str , a__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error("Vocabulary path ({}) should be a directory".format(a__ ) )
return
lowerCAmelCase__ : List[Any] = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,)
| 568 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class lowercase__:
'''simple docstring'''
def __init__( self :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str]=13 , lowerCamelCase_ :Optional[Any]=7 , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Optional[int]=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :int=99 , lowerCamelCase_ :Optional[int]=32 , lowerCamelCase_ :Optional[Any]=2 , lowerCamelCase_ :Dict=4 , lowerCamelCase_ :int=37 , lowerCamelCase_ :List[str]="gelu" , lowerCamelCase_ :Tuple=0.1 , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :List[Any]=5_12 , lowerCamelCase_ :Any=16 , lowerCamelCase_ :str=2 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :int=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :Any=0 , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : Optional[Any] = seq_length
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : Optional[Any] = use_input_mask
SCREAMING_SNAKE_CASE : Any = use_token_type_ids
SCREAMING_SNAKE_CASE : List[str] = use_labels
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Any = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : int = type_sequence_label_size
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE : List[str] = num_choices
SCREAMING_SNAKE_CASE : Union[str, Any] = scope
SCREAMING_SNAKE_CASE : str = projection_dim
def __lowerCAmelCase ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Tuple = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
SCREAMING_SNAKE_CASE : Union[str, Any] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[str] , lowerCamelCase_ :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = TFDPRContextEncoder(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :str , lowerCamelCase_ :List[str] , lowerCamelCase_ :Dict , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = TFDPRQuestionEncoder(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = TFDPRReader(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def __lowerCAmelCase ( self :str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE : Any = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
UpperCamelCase = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = TFDPRModelTester(self )
SCREAMING_SNAKE_CASE : Optional[int] = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : int = TFDPRContextEncoder.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[int] = TFDPRContextEncoder.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[Any] = TFDPRQuestionEncoder.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Union[str, Any] = TFDPRReader.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_tf
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self :List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
SCREAMING_SNAKE_CASE : List[str] = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE : Optional[Any] = tf.constant(
[
[
0.0_3_2_3_6_2_5_3,
0.1_2_7_5_3_3_3_5,
0.1_6_8_1_8_5_0_9,
0.0_0_2_7_9_7_8_6,
0.3_8_9_6_9_3_3,
0.2_4_2_6_4_9_4_5,
0.2_1_7_8_9_7_1,
-0.0_2_3_3_5_2_2_7,
-0.0_8_4_8_1_9_5_9,
-0.1_4_3_2_4_1_1_7,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 698 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """nllb-moe"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :List[str] , lowerCamelCase_ :Optional[int]=12_81_12 , lowerCamelCase_ :str=10_24 , lowerCamelCase_ :Any=12 , lowerCamelCase_ :Optional[int]=40_96 , lowerCamelCase_ :int=16 , lowerCamelCase_ :List[str]=12 , lowerCamelCase_ :Optional[int]=40_96 , lowerCamelCase_ :int=16 , lowerCamelCase_ :Union[str, Any]=0.0_5 , lowerCamelCase_ :Optional[int]=0.0_5 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :Tuple="relu" , lowerCamelCase_ :str=10_24 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Any=False , lowerCamelCase_ :Optional[Any]="float32" , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :List[Any]=1_28 , lowerCamelCase_ :Any=64 , lowerCamelCase_ :Optional[int]=4 , lowerCamelCase_ :List[str]=4 , lowerCamelCase_ :Union[str, Any]=0.0_0_1 , lowerCamelCase_ :Optional[int]=0.0_0_1 , lowerCamelCase_ :List[str]="all" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :Any=False , lowerCamelCase_ :Tuple=1.0 , lowerCamelCase_ :Union[str, Any]=0.2 , lowerCamelCase_ :List[str]=1 , lowerCamelCase_ :Optional[int]=0 , lowerCamelCase_ :int=2 , lowerCamelCase_ :List[str]=False , **lowerCamelCase_ :int , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : str = d_model
SCREAMING_SNAKE_CASE : Optional[int] = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Any = encoder_layers
SCREAMING_SNAKE_CASE : Any = encoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : List[str] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Any = activation_function
SCREAMING_SNAKE_CASE : Tuple = init_std
SCREAMING_SNAKE_CASE : str = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : Optional[int] = encoder_layers
SCREAMING_SNAKE_CASE : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE : int = router_z_loss_coef
SCREAMING_SNAKE_CASE : Any = router_aux_loss_coef
SCREAMING_SNAKE_CASE : str = decoder_sparse_step
SCREAMING_SNAKE_CASE : str = encoder_sparse_step
SCREAMING_SNAKE_CASE : List[str] = num_experts
SCREAMING_SNAKE_CASE : Union[str, Any] = expert_capacity
SCREAMING_SNAKE_CASE : Tuple = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
SCREAMING_SNAKE_CASE : Union[str, Any] = router_dtype
SCREAMING_SNAKE_CASE : Union[str, Any] = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE : int = batch_prioritized_routing
SCREAMING_SNAKE_CASE : Optional[int] = second_expert_policy
SCREAMING_SNAKE_CASE : Union[str, Any] = normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE : Any = moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE : Optional[Any] = moe_token_dropout
SCREAMING_SNAKE_CASE : Tuple = output_router_logits
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
| 698 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a__ : List[str] ={
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple =[
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
a__ : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 703 |
'''simple docstring'''
import inspect
import unittest
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self : Optional[Any] ):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def _lowerCamelCase ( self : Tuple ):
import diffusers
from diffusers.dependency_versions_table import deps
__UpperCamelCase = inspect.getmembers(__A , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
__UpperCamelCase = 'k-diffusion'
elif backend == "invisible_watermark":
__UpperCamelCase = 'invisible-watermark'
assert backend in deps, f'''{backend} is not in the deps table!'''
| 434 | 0 |
class _a :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Dict ):
A_ = None
A_ = None
A_ = graph
self._normalize_graph(UpperCAmelCase , UpperCAmelCase )
A_ = len(UpperCAmelCase )
A_ = None
def __A ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple ):
if sources is int:
A_ = [sources]
if sinks is int:
A_ = [sinks]
if len(UpperCAmelCase ) == 0 or len(UpperCAmelCase ) == 0:
return
A_ = sources[0]
A_ = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(UpperCAmelCase ) > 1 or len(UpperCAmelCase ) > 1:
A_ = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
A_ = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
A_ = max_input_flow
A_ = 0
A_ = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
A_ = max_input_flow
A_ = size - 1
def __A ( self : str ):
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __A ( self : Tuple , UpperCAmelCase : List[Any] ):
A_ = algorithm(self )
class _a :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : List[str] ):
A_ = flow_network
A_ = flow_network.verticesCount
A_ = flow_network.sourceIndex
A_ = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
A_ = flow_network.graph
A_ = False
def __A ( self : Optional[int] ):
if not self.executed:
self._algorithm()
A_ = True
def __A ( self : Dict ):
pass
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : List[Any] ):
super().__init__(UpperCAmelCase )
# use this to save your result
A_ = -1
def __A ( self : Tuple ):
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : Union[str, Any] ):
super().__init__(UpperCAmelCase )
A_ = [[0] * self.verticies_count for i in range(self.verticies_count )]
A_ = [0] * self.verticies_count
A_ = [0] * self.verticies_count
def __A ( self : List[str] ):
A_ = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
A_ = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
A_ = 0
while i < len(UpperCAmelCase ):
A_ = vertices_list[i]
A_ = self.heights[vertex_index]
self.process_vertex(UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(UpperCAmelCase ) )
A_ = 0
else:
i += 1
A_ = sum(self.preflow[self.source_index] )
def __A ( self : List[str] , UpperCAmelCase : Dict ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(UpperCAmelCase , UpperCAmelCase )
self.relabel(UpperCAmelCase )
def __A ( self : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ):
A_ = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __A ( self : Optional[Any] , UpperCAmelCase : List[Any] ):
A_ = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
A_ = self.heights[to_index]
if min_height is not None:
A_ = min_height + 1
if __name__ == "__main__":
__a :Tuple = [0]
__a :Tuple = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__a :List[str] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__a :List[str] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__a :List[Any] = flow_network.find_maximum_flow()
print(F"maximum flow is {maximum_flow}") | 86 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 502 | 0 |
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = ''''''
for i in table:
res += inp[i - 1]
return res
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
return data[1:] + data[0]
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = ''''''
for i in range(len(lowercase ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = int('''0b''' + data[0] + data[-1] , 2 )
__lowercase = int('''0b''' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
__lowercase = message[:4]
__lowercase = message[4:]
__lowercase = apply_table(lowercase , lowercase )
__lowercase = xor(lowercase , lowercase )
__lowercase = apply_sbox(lowercase , temp[:4] ) # noqa: E741
__lowercase = apply_sbox(lowercase , temp[4:] )
__lowercase = '''0''' * (2 - len(lowercase )) + l # noqa: E741
__lowercase = '''0''' * (2 - len(lowercase )) + r
__lowercase = apply_table(l + r , lowercase )
__lowercase = xor(lowercase , lowercase )
return temp + right
if __name__ == "__main__":
__a : str = input("""Enter 10 bit key: """)
__a : Optional[Any] = input("""Enter 8 bit message: """)
__a : Any = [6, 3, 7, 4, 8, 5, 1_0, 9]
__a : Tuple = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
__a : Optional[int] = [2, 4, 3, 1]
__a : Dict = [2, 6, 3, 1, 4, 8, 5, 7]
__a : Any = [4, 1, 3, 5, 7, 2, 8, 6]
__a : int = [4, 1, 2, 3, 2, 3, 4, 1]
__a : Union[str, Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__a : Tuple = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__a : Dict = apply_table(key, paa_table)
__a : Any = temp[:5]
__a : Optional[int] = temp[5:]
__a : List[Any] = left_shift(left)
__a : Any = left_shift(right)
__a : List[Any] = apply_table(left + right, pa_table)
__a : Any = left_shift(left)
__a : Dict = left_shift(right)
__a : Tuple = left_shift(left)
__a : List[Any] = left_shift(right)
__a : Any = apply_table(left + right, pa_table)
# encryption
__a : Dict = apply_table(message, IP)
__a : Optional[Any] = function(expansion, sa, sa, keya, temp)
__a : int = temp[4:] + temp[:4]
__a : Optional[int] = function(expansion, sa, sa, keya, temp)
__a : List[Any] = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
__a : Optional[Any] = apply_table(CT, IP)
__a : List[str] = function(expansion, sa, sa, keya, temp)
__a : Tuple = temp[4:] + temp[:4]
__a : List[Any] = function(expansion, sa, sa, keya, temp)
__a : Any = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT) | 710 | from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__a : Dict = logging.get_logger(__name__)
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : str = ['''pixel_values''']
def __init__( self , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 2_55 , lowerCAmelCase__ = True , lowerCAmelCase__ = 8 , **lowerCAmelCase__ , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_pad
__lowercase = pad_size
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> np.ndarray:
'''simple docstring'''
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[Any]:
'''simple docstring'''
__lowercase , __lowercase = get_image_size(lowerCAmelCase__ )
__lowercase = (old_height // size + 1) * size - old_height
__lowercase = (old_width // size + 1) * size - old_width
return pad(lowerCAmelCase__ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , **lowerCAmelCase__ , ) -> Tuple:
'''simple docstring'''
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_pad if do_pad is not None else self.do_pad
__lowercase = pad_size if pad_size is not None else self.pad_size
__lowercase = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_pad:
__lowercase = [self.pad(lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
__lowercase = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ ) | 522 | 0 |
from typing import List
import numpy as np
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
UpperCamelCase__ : Union[str, Any] = {key: len(UpperCamelCase__ ) for key, value in gen_kwargs.items() if isinstance(UpperCamelCase__ , UpperCamelCase__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(f'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
UpperCamelCase__ : List[str] = max(lists_lengths.values() , default=0 )
return max(1 , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : str = []
for group_idx in range(UpperCamelCase__ ):
UpperCamelCase__ : Union[str, Any] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
UpperCamelCase__ : Union[str, Any] = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
UpperCamelCase__ : Tuple = range(UpperCamelCase__ , start + num_shards_to_add )
shards_indices_per_group.append(UpperCamelCase__ )
return shards_indices_per_group
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : Union[str, Any] = _number_of_shards_in_gen_kwargs(UpperCamelCase__ )
if num_shards == 1:
return [dict(UpperCamelCase__ )]
else:
UpperCamelCase__ : str = _distribute_shards(num_shards=UpperCamelCase__ , max_num_jobs=UpperCamelCase__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(UpperCamelCase__ ) )
]
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , UpperCamelCase__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : Dict = {len(UpperCamelCase__ ) for value in gen_kwargs.values() if isinstance(UpperCamelCase__ , UpperCamelCase__ )}
UpperCamelCase__ : Union[str, Any] = {}
for size in list_sizes:
UpperCamelCase__ : Dict = list(range(UpperCamelCase__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
UpperCamelCase__ : str = dict(UpperCamelCase__ )
for key, value in shuffled_kwargs.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : str = [value[i] for i in indices_per_size[len(UpperCamelCase__ )]]
return shuffled_kwargs
| 285 |
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if index == number_of_items:
return 0
UpperCamelCase__ : str = 0
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : int = knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , index + 1 )
if weights[index] <= max_weight:
UpperCamelCase__ : List[Any] = values[index] + knapsack(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , max_weight - weights[index] , index + 1 )
return max(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 | 1 |
import os
from distutils.util import strtobool
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
for e in env_keys:
__UpperCamelCase = int(os.environ.get(lowercase_ , -1 ) )
if val >= 0:
return val
return default
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> Dict:
"""simple docstring"""
__UpperCamelCase = os.environ.get(lowercase_ , str(lowercase_ ) )
return strtobool(lowercase_ ) == 1 # As its name indicates `strtobool` actually returns an int...
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_="no" ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = os.environ.get(lowercase_ , str(lowercase_ ) )
return value
| 710 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCamelCase :
"""simple docstring"""
def __init__( self : Any , snake_case : Optional[Any] , snake_case : Tuple=13 , snake_case : List[Any]=32 , snake_case : Optional[Any]=2 , snake_case : Dict=3 , snake_case : Union[str, Any]=16 , snake_case : Optional[int]=[32, 64, 128] , snake_case : int=[1, 2, 1] , snake_case : Any=[2, 2, 4] , snake_case : Optional[int]=2 , snake_case : List[str]=2.0 , snake_case : Any=True , snake_case : Optional[Any]=0.0 , snake_case : str=0.0 , snake_case : List[Any]=0.1 , snake_case : List[str]="gelu" , snake_case : Any=False , snake_case : Optional[int]=True , snake_case : List[str]=0.02 , snake_case : List[Any]=1E-5 , snake_case : str=True , snake_case : Any=None , snake_case : int=True , snake_case : Optional[Any]=10 , snake_case : List[str]=8 , snake_case : List[str]=["stage1", "stage2"] , snake_case : Optional[Any]=[1, 2] , ):
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = embed_dim
__UpperCamelCase = hidden_sizes
__UpperCamelCase = depths
__UpperCamelCase = num_heads
__UpperCamelCase = window_size
__UpperCamelCase = mlp_ratio
__UpperCamelCase = qkv_bias
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = drop_path_rate
__UpperCamelCase = hidden_act
__UpperCamelCase = use_absolute_embeddings
__UpperCamelCase = patch_norm
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = initializer_range
__UpperCamelCase = is_training
__UpperCamelCase = scope
__UpperCamelCase = use_labels
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = encoder_stride
__UpperCamelCase = out_features
__UpperCamelCase = out_indices
def snake_case ( self : Union[str, Any] ):
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case ( self : List[str] ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def snake_case ( self : int , snake_case : Optional[int] , snake_case : List[str] , snake_case : Optional[int] ):
__UpperCamelCase = FocalNetModel(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = model(snake_case )
__UpperCamelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCamelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def snake_case ( self : Optional[Any] , snake_case : Any , snake_case : Dict , snake_case : int ):
__UpperCamelCase = FocalNetBackbone(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = model(snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__UpperCamelCase = None
__UpperCamelCase = FocalNetBackbone(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = model(snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case ( self : Optional[int] , snake_case : Union[str, Any] , snake_case : int , snake_case : Any ):
__UpperCamelCase = FocalNetForMaskedImageModeling(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = model(snake_case )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__UpperCamelCase = 1
__UpperCamelCase = FocalNetForMaskedImageModeling(snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase = model(snake_case )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def snake_case ( self : str , snake_case : Tuple , snake_case : List[str] , snake_case : Union[str, Any] ):
__UpperCamelCase = self.type_sequence_label_size
__UpperCamelCase = FocalNetForImageClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCamelCase = 1
__UpperCamelCase = FocalNetForImageClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case ( self : str ):
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : int = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Optional[Any] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : List[Any] = False
lowerCAmelCase__ : Optional[Any] = False
def snake_case ( self : Any ):
__UpperCamelCase = FocalNetModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=snake_case , embed_dim=37 , has_text_modality=snake_case )
def snake_case ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : List[Any] ):
return
def snake_case ( self : List[Any] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def snake_case ( self : List[Any] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*snake_case )
def snake_case ( self : Any ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case )
def snake_case ( self : Any ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@unittest.skip(reason='''FocalNet does not use inputs_embeds''' )
def snake_case ( self : Optional[Any] ):
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''' )
def snake_case ( self : int ):
pass
def snake_case ( self : Tuple ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__UpperCamelCase = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def snake_case ( self : Dict ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__UpperCamelCase = model_class(snake_case )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case )
def snake_case ( self : Optional[int] , snake_case : Union[str, Any] , snake_case : int , snake_case : List[str] , snake_case : Optional[int] ):
__UpperCamelCase = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
__UpperCamelCase = model(**self._prepare_for_class(snake_case , snake_case ) )
__UpperCamelCase = outputs.hidden_states
__UpperCamelCase = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(snake_case ) , snake_case )
# FocalNet has a different seq_length
__UpperCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__UpperCamelCase = outputs.reshaped_hidden_states
self.assertEqual(len(snake_case ) , snake_case )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = reshaped_hidden_states[0].shape
__UpperCamelCase = (
reshaped_hidden_states[0].view(snake_case , snake_case , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def snake_case ( self : Union[str, Any] ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__UpperCamelCase = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
def snake_case ( self : Optional[Any] ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = 3
__UpperCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCamelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCamelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__UpperCamelCase = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
@slow
def snake_case ( self : Union[str, Any] ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = FocalNetModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def snake_case ( self : Optional[int] ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = _config_zero_init(snake_case )
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(config=snake_case )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : str ):
# TODO update organization
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None
@slow
def snake_case ( self : List[str] ):
__UpperCamelCase = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(snake_case )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__UpperCamelCase = image_processor(images=snake_case , return_tensors='''pt''' ).to(snake_case )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**snake_case )
# verify the logits
__UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case )
__UpperCamelCase = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : str = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase__ : Dict = FocalNetConfig
lowerCAmelCase__ : List[str] = False
def snake_case ( self : Dict ):
__UpperCamelCase = FocalNetModelTester(self )
| 375 | 0 |
import string
def lowerCamelCase ( a_ ) -> Any:
lowerCAmelCase_ = ''
for i in sequence:
lowerCAmelCase_ = ord(_UpperCamelCase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def lowerCamelCase ( a_ ) -> Optional[Any]:
lowerCAmelCase_ = string.ascii_letters
lowerCAmelCase_ = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(_UpperCamelCase )] if c in letters else c for c in sequence )
def lowerCamelCase ( ) -> List[Any]:
from timeit import timeit
print('Running performance benchmarks...' )
lowerCAmelCase_ = 'from string import printable ; from __main__ import atbash, atbash_slow'
print(F'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=_UpperCamelCase )} seconds''' )
print(F'''> atbash(): {timeit("atbash(printable)" , setup=_UpperCamelCase )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 318 |
import math
def a__ ( _UpperCamelCase : float ,_UpperCamelCase : float ):
if (
not isinstance(_UpperCamelCase ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def a__ ( _UpperCamelCase : float ,_UpperCamelCase : float ):
if (
not isinstance(_UpperCamelCase ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : str = {"vocab_file": "spiece.model"}
UpperCamelCase : Any = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class lowerCamelCase__ ( UpperCAmelCase_ ):
def __init__( self : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : List[Any]=False , _lowercase : Union[str, Any]=True , _lowercase : Dict=False , _lowercase : Optional[Any]="<s>" , _lowercase : Optional[Any]="</s>" , _lowercase : Union[str, Any]="<unk>" , _lowercase : Optional[int]="<sep>" , _lowercase : List[Any]="<pad>" , _lowercase : str="<cls>" , _lowercase : Optional[int]="<mask>" , _lowercase : Dict=["<eop>", "<eod>"] , _lowercase : Optional[Dict[str, Any]] = None , **_lowercase : Union[str, Any] , ):
A = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
A = 3
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowercase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '
'See https://pypi.org/project/jieba/ for installation.' )
A = jieba
A = str.maketrans(' \n' , '\u2582\u2583' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __a ( self : Any ):
return len(self.sp_model )
def __a ( self : List[str] ):
A = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : Dict , _lowercase : Optional[int] ):
A = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __a ( self : Any , _lowercase : Tuple ):
if self.remove_space:
A = ' '.join(inputs.strip().split() )
else:
A = inputs
A = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
A = unicodedata.normalize('NFKD' , _lowercase )
A = ''.join([c for c in outputs if not unicodedata.combining(_lowercase )] )
if self.do_lower_case:
A = outputs.lower()
return outputs
def __a ( self : Dict , _lowercase : str ):
A = self.preprocess_text(_lowercase )
A = self.sp_model.encode(_lowercase , out_type=_lowercase )
A = []
for piece in pieces:
if len(_lowercase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
A = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowercase , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A = cur_pieces[1:]
else:
A = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_lowercase )
else:
new_pieces.append(_lowercase )
return new_pieces
def __a ( self : int , _lowercase : Any ):
return self.sp_model.PieceToId(_lowercase )
def __a ( self : Dict , _lowercase : Dict ):
return self.sp_model.IdToPiece(_lowercase )
def __a ( self : List[str] , _lowercase : List[Any] ):
A = ''.join(_lowercase ).replace(_lowercase , ' ' ).strip()
return out_string
def __a ( self : Dict , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self : Optional[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is not None:
return ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1, 1]
return ([0] * len(_lowercase )) + [1, 1]
def __a ( self : Optional[int] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __a ( self : int , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowercase , 'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (out_vocab_file,)
def __a ( self : Dict , *_lowercase : Optional[int] , **_lowercase : Dict ):
A = super()._decode(*_lowercase , **_lowercase )
A = text.replace(' ' , '' ).replace('\u2582' , ' ' ).replace('\u2583' , '\n' )
return text
| 91 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Dict = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
UpperCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 91 | 1 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( lowerCAmelCase__: Union[str, Any] ,lowerCAmelCase__: List[str] ,lowerCAmelCase__: Dict ) -> Optional[int]:
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def _UpperCamelCase ( lowerCAmelCase__: Optional[int] ,lowerCAmelCase__: str ,lowerCAmelCase__: Tuple ,lowerCAmelCase__: List[Any]="attention" ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
SCREAMING_SNAKE_CASE_ = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
SCREAMING_SNAKE_CASE_ = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
SCREAMING_SNAKE_CASE_ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
SCREAMING_SNAKE_CASE_ = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
SCREAMING_SNAKE_CASE_ = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
SCREAMING_SNAKE_CASE_ = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
SCREAMING_SNAKE_CASE_ = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def _UpperCamelCase ( lowerCAmelCase__: int ,lowerCAmelCase__: List[Any] ,lowerCAmelCase__: Optional[Any] ,lowerCAmelCase__: Tuple=False ) -> int:
if split_mlp_wi:
SCREAMING_SNAKE_CASE_ = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
SCREAMING_SNAKE_CASE_ = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
SCREAMING_SNAKE_CASE_ = (wi_a, wi_a)
else:
SCREAMING_SNAKE_CASE_ = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
SCREAMING_SNAKE_CASE_ = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def _UpperCamelCase ( lowerCAmelCase__: Optional[int] ,lowerCAmelCase__: Dict ,lowerCAmelCase__: Union[str, Any] ,lowerCAmelCase__: Dict ) -> Optional[Any]:
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def _UpperCamelCase ( lowerCAmelCase__: Optional[Any] ,*, lowerCAmelCase__: Optional[Any] ,lowerCAmelCase__: List[str] ,lowerCAmelCase__: str = False ) -> Tuple:
SCREAMING_SNAKE_CASE_ = traverse_util.flatten_dict(variables['target'] )
SCREAMING_SNAKE_CASE_ = {'/'.join(_A ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
SCREAMING_SNAKE_CASE_ = 'encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:' ,_A )
SCREAMING_SNAKE_CASE_ = collections.OrderedDict()
# Shared embeddings.
SCREAMING_SNAKE_CASE_ = old['token_embedder/embedding']
# Encoder.
for i in range(_A ):
# Block i, layer 0 (Self Attention).
SCREAMING_SNAKE_CASE_ = tax_layer_norm_lookup(_A ,_A ,'encoder' ,'pre_attention_layer_norm' )
SCREAMING_SNAKE_CASE_ = tax_attention_lookup(_A ,_A ,'encoder' ,'attention' )
SCREAMING_SNAKE_CASE_ = layer_norm
SCREAMING_SNAKE_CASE_ = k.T
SCREAMING_SNAKE_CASE_ = o.T
SCREAMING_SNAKE_CASE_ = q.T
SCREAMING_SNAKE_CASE_ = v.T
# Block i, layer 1 (MLP).
SCREAMING_SNAKE_CASE_ = tax_layer_norm_lookup(_A ,_A ,'encoder' ,'pre_mlp_layer_norm' )
SCREAMING_SNAKE_CASE_ = tax_mlp_lookup(_A ,_A ,'encoder' ,_A )
SCREAMING_SNAKE_CASE_ = layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE_ = wi[0].T
SCREAMING_SNAKE_CASE_ = wi[1].T
else:
SCREAMING_SNAKE_CASE_ = wi.T
SCREAMING_SNAKE_CASE_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
SCREAMING_SNAKE_CASE_ = tax_relpos_bias_lookup(
_A ,_A ,'encoder' ).T
SCREAMING_SNAKE_CASE_ = old['encoder/encoder_norm/scale']
if not scalable_attention:
SCREAMING_SNAKE_CASE_ = tax_relpos_bias_lookup(
_A ,0 ,'encoder' ).T
SCREAMING_SNAKE_CASE_ = tax_relpos_bias_lookup(
_A ,0 ,'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(_A ):
# Block i, layer 0 (Self Attention).
SCREAMING_SNAKE_CASE_ = tax_layer_norm_lookup(_A ,_A ,'decoder' ,'pre_self_attention_layer_norm' )
SCREAMING_SNAKE_CASE_ = tax_attention_lookup(_A ,_A ,'decoder' ,'self_attention' )
SCREAMING_SNAKE_CASE_ = layer_norm
SCREAMING_SNAKE_CASE_ = k.T
SCREAMING_SNAKE_CASE_ = o.T
SCREAMING_SNAKE_CASE_ = q.T
SCREAMING_SNAKE_CASE_ = v.T
# Block i, layer 1 (Cross Attention).
SCREAMING_SNAKE_CASE_ = tax_layer_norm_lookup(_A ,_A ,'decoder' ,'pre_cross_attention_layer_norm' )
SCREAMING_SNAKE_CASE_ = tax_attention_lookup(_A ,_A ,'decoder' ,'encoder_decoder_attention' )
SCREAMING_SNAKE_CASE_ = layer_norm
SCREAMING_SNAKE_CASE_ = k.T
SCREAMING_SNAKE_CASE_ = o.T
SCREAMING_SNAKE_CASE_ = q.T
SCREAMING_SNAKE_CASE_ = v.T
# Block i, layer 2 (MLP).
SCREAMING_SNAKE_CASE_ = tax_layer_norm_lookup(_A ,_A ,'decoder' ,'pre_mlp_layer_norm' )
SCREAMING_SNAKE_CASE_ = tax_mlp_lookup(_A ,_A ,'decoder' ,_A )
SCREAMING_SNAKE_CASE_ = layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE_ = wi[0].T
SCREAMING_SNAKE_CASE_ = wi[1].T
else:
SCREAMING_SNAKE_CASE_ = wi.T
SCREAMING_SNAKE_CASE_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
SCREAMING_SNAKE_CASE_ = tax_relpos_bias_lookup(_A ,_A ,'decoder' ).T
SCREAMING_SNAKE_CASE_ = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
SCREAMING_SNAKE_CASE_ = old['decoder/logits_dense/kernel'].T
return new
def _UpperCamelCase ( lowerCAmelCase__: Union[str, Any] ,lowerCAmelCase__: List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
SCREAMING_SNAKE_CASE_ = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
SCREAMING_SNAKE_CASE_ = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
SCREAMING_SNAKE_CASE_ = state_dict['shared.weight']
return state_dict
def _UpperCamelCase ( lowerCAmelCase__: Dict ,lowerCAmelCase__: Tuple ,lowerCAmelCase__: Union[str, Any] ,lowerCAmelCase__: Any ,lowerCAmelCase__: List[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = checkpoints.load_tax_checkpoint(_A )
SCREAMING_SNAKE_CASE_ = convert_tax_to_pytorch(
_A ,num_layers=config.num_layers ,is_encoder_only=_A ,scalable_attention=_A )
SCREAMING_SNAKE_CASE_ = make_state_dict(_A ,_A )
model.load_state_dict(_A ,strict=_A )
def _UpperCamelCase ( lowerCAmelCase__: str ,lowerCAmelCase__: Optional[int] ,lowerCAmelCase__: str ,lowerCAmelCase__: List[Any] = False ,lowerCAmelCase__: List[Any] = False ,) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = MTaConfig.from_json_file(_A )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
SCREAMING_SNAKE_CASE_ = UMTaEncoderModel(_A )
else:
SCREAMING_SNAKE_CASE_ = UMTaForConditionalGeneration(_A )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_A ,_A ,_A ,_A ,_A )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_A )
# Verify that we can load the checkpoint.
model.from_pretrained(_A )
print('Done' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
parser.add_argument(
"--scalable_attention",
action="store_true",
help="Whether the model uses scaled attention (umt5 model)",
default=False,
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 294 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : int = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = ["""OwlViTFeatureExtractor"""]
lowerCAmelCase : int = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 444 | 0 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
a = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
a = [0, 2_5, 5_0]
a = [2_5, 5_0, 7_5]
a = fuzz.membership.trimf(X, abca)
a = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
a = np.ones(7_5)
a = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
a = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
a = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
a = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
a = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
a = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
a = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
a = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
a = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 716 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
# TODO Update this
a = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Tuple = 'esm'
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : Dict=3_072 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[int]=1_026 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Dict=1e-12 , UpperCamelCase__ : List[str]="absolute" , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , **UpperCamelCase__ : str , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = use_cache
lowercase_ = emb_layer_norm_before
lowercase_ = token_dropout
lowercase_ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
lowercase_ = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = EsmFoldConfig(**UpperCamelCase__ )
lowercase_ = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
lowercase_ = get_default_vocab_list()
else:
lowercase_ = vocab_list
else:
lowercase_ = None
lowercase_ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , UpperCamelCase__ ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
lowercase_ = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : "TrunkConfig" = None
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if self.trunk is None:
lowercase_ = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
lowercase_ = TrunkConfig(**self.trunk )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = asdict(self )
lowercase_ = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : int = 48
__SCREAMING_SNAKE_CASE : int = 1024
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Optional[int] = 128
__SCREAMING_SNAKE_CASE : "StructureModuleConfig" = None
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
if self.structure_module is None:
lowercase_ = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
lowercase_ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
lowercase_ = self.sequence_state_dim // self.sequence_head_width
lowercase_ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = asdict(self )
lowercase_ = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : int = 384
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 16
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 12
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : int = 8
__SCREAMING_SNAKE_CASE : float = 0.1
__SCREAMING_SNAKE_CASE : int = 8
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : int = 7
__SCREAMING_SNAKE_CASE : int = 10
__SCREAMING_SNAKE_CASE : float = 1e-8
__SCREAMING_SNAKE_CASE : float = 1e5
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return asdict(self )
def UpperCAmelCase_ ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 650 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
a_ : Tuple = TypeVar('''T''')
class __lowercase( Generic[T] ):
'''simple docstring'''
def __init__( self , __a , __a ):
__lowerCamelCase : Any | T = None
__lowerCamelCase : int = len(__a )
__lowerCamelCase : list[T] = [any_type for _ in range(self.N )] + arr
__lowerCamelCase : Optional[int] = fnc
self.build()
def snake_case_ ( self ):
for p in range(self.N - 1 , 0 , -1 ):
__lowerCamelCase : int = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def snake_case_ ( self , __a , __a ):
p += self.N
__lowerCamelCase : Tuple = v
while p > 1:
__lowerCamelCase : Tuple = p // 2
__lowerCamelCase : Any = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def snake_case_ ( self , __a , __a ): # noqa: E741
__lowerCamelCase : List[Any] = l + self.N, r + self.N
__lowerCamelCase : T | None = None
while l <= r:
if l % 2 == 1:
__lowerCamelCase : Any = self.st[l] if res is None else self.fn(__a , self.st[l] )
if r % 2 == 0:
__lowerCamelCase : Any = self.st[r] if res is None else self.fn(__a , self.st[r] )
__lowerCamelCase : List[str] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
a_ : str = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
a_ : Any = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
a_ : int = SegmentTree(test_array, min)
a_ : Optional[Any] = SegmentTree(test_array, max)
a_ : Any = SegmentTree(test_array, lambda a, b: a + b)
def UpperCAmelCase ( ) -> List[Any]:
for i in range(len(A__ ) ):
for j in range(A__ , len(A__ ) ):
__lowerCamelCase : Dict = reduce(A__ , test_array[i : j + 1] )
__lowerCamelCase : List[Any] = reduce(A__ , test_array[i : j + 1] )
__lowerCamelCase : Optional[int] = reduce(lambda A__ , A__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(A__ , A__ )
assert max_range == max_segment_tree.query(A__ , A__ )
assert sum_range == sum_segment_tree.query(A__ , A__ )
test_all_segments()
for index, value in test_updates.items():
a_ : Any = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 594 |
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
UpperCamelCase : List[Any] = VideoClassificationPipeline(model=lowerCamelCase , image_processor=lowerCamelCase , top_k=2 )
UpperCamelCase : List[str] = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase ) -> List[Any]:
'''simple docstring'''
for example in examples:
UpperCamelCase : int = video_classifier(lowerCamelCase )
self.assertEqual(
lowerCamelCase , [
{"score": ANY(lowerCamelCase ), "label": ANY(lowerCamelCase )},
{"score": ANY(lowerCamelCase ), "label": ANY(lowerCamelCase )},
] , )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
UpperCamelCase : List[Any] = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
UpperCamelCase : Any = VideoMAEFeatureExtractor(
size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} )
UpperCamelCase : List[str] = pipeline(
"video-classification" , model=lowerCamelCase , feature_extractor=lowerCamelCase , frame_sampling_rate=4 )
UpperCamelCase : int = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
UpperCamelCase : List[str] = video_classifier(lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}] , )
UpperCamelCase : List[Any] = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
] , )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
| 173 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : Optional[int] = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
__snake_case : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Optional[int] = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 0 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( __a , unittest.TestCase ):
A : Dict = GPTaTokenizer
A : List[str] = GPTaTokenizerFast
A : Optional[int] = True
A : str = {'''add_prefix_space''': True}
A : Dict = False
def __lowerCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
lowercase : Any = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowercase : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowercase : Optional[int] = {'''unk_token''': '''<unk>'''}
lowercase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(snake_case__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(snake_case__ ) )
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **snake_case__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = '''lower newer'''
lowercase : Union[str, Any] = '''lower newer'''
return input_text, output_text
def __lowerCamelCase ( self ):
lowercase : List[str] = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase : List[Any] = '''lower newer'''
lowercase : Any = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowercase : Tuple = tokenizer.tokenize(snake_case__ , add_prefix_space=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowercase : Union[str, Any] = tokens + [tokenizer.unk_token]
lowercase : Union[str, Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
def __lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
lowercase : int = self.get_tokenizer()
lowercase : Any = self.get_rust_tokenizer(add_prefix_space=snake_case__ )
lowercase : Optional[Any] = '''lower newer'''
# Testing tokenization
lowercase : Union[str, Any] = tokenizer.tokenize(snake_case__ , add_prefix_space=snake_case__ )
lowercase : str = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
# Testing conversion to ids without special tokens
lowercase : Any = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ , add_prefix_space=snake_case__ )
lowercase : Optional[Any] = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
# Testing conversion to ids with special tokens
lowercase : Dict = self.get_rust_tokenizer(add_prefix_space=snake_case__ )
lowercase : Union[str, Any] = tokenizer.encode(snake_case__ , add_prefix_space=snake_case__ )
lowercase : Union[str, Any] = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
# Testing the unknown token
lowercase : str = tokens + [rust_tokenizer.unk_token]
lowercase : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
# Simple input
lowercase : Dict = '''This is a simple input'''
lowercase : Any = ['''This is a simple input 1''', '''This is a simple input 2''']
lowercase : List[Any] = ('''This is a simple input''', '''This is a pair''')
lowercase : Dict = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(snake_case__ , tokenizer_r.encode , snake_case__ , max_length=snake_case__ , padding='''max_length''' )
# Simple input
self.assertRaises(snake_case__ , tokenizer_r.encode_plus , snake_case__ , max_length=snake_case__ , padding='''max_length''' )
# Simple input
self.assertRaises(
snake_case__ , tokenizer_r.batch_encode_plus , snake_case__ , max_length=snake_case__ , padding='''max_length''' , )
# Pair input
self.assertRaises(snake_case__ , tokenizer_r.encode , snake_case__ , max_length=snake_case__ , padding='''max_length''' )
# Pair input
self.assertRaises(snake_case__ , tokenizer_r.encode_plus , snake_case__ , max_length=snake_case__ , padding='''max_length''' )
# Pair input
self.assertRaises(
snake_case__ , tokenizer_r.batch_encode_plus , snake_case__ , max_length=snake_case__ , padding='''max_length''' , )
def __lowerCamelCase ( self ):
lowercase : Optional[int] = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
lowercase : Optional[Any] = '''This is a simple input'''
lowercase : str = ['''This is a simple input looooooooong''', '''This is a simple input''']
lowercase : Any = ('''This is a simple input''', '''This is a pair''')
lowercase : int = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
lowercase : Dict = tokenizer.pad_token_id
lowercase : str = tokenizer(snake_case__ , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
lowercase : Tuple = tokenizer(snake_case__ , padding=snake_case__ , truncate=snake_case__ , return_tensors='''np''' )
lowercase : Any = tokenizer(*snake_case__ , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
lowercase : List[str] = tokenizer(snake_case__ , padding=snake_case__ , truncate=snake_case__ , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def __lowerCamelCase ( self ):
lowercase : List[Any] = '''$$$'''
lowercase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=snake_case__ , add_bos_token=snake_case__ )
lowercase : str = '''This is a simple input'''
lowercase : List[Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
lowercase : int = tokenizer.bos_token_id
lowercase : Union[str, Any] = tokenizer(snake_case__ )
lowercase : str = tokenizer(snake_case__ )
self.assertEqual(out_s.input_ids[0] , snake_case__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase : List[Any] = tokenizer.decode(out_s.input_ids )
lowercase : List[str] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , snake_case__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
lowercase : int = [self.get_tokenizer(do_lower_case=snake_case__ , add_bos_token=snake_case__ )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowercase : Union[str, Any] = '''Encode this.'''
lowercase : str = '''This one too please.'''
lowercase : List[str] = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
encoded_sequence += tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
lowercase : List[Any] = tokenizer.encode_plus(
snake_case__ , snake_case__ , add_special_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , )
lowercase : Tuple = encoded_sequence_dict['''input_ids''']
lowercase : Optional[Any] = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
lowercase : int = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(snake_case__ )
]
lowercase : str = [x for x in filtered_sequence if x is not None]
self.assertEqual(snake_case__ , snake_case__ )
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
lowercase : str = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=snake_case__ )
lowercase : int = '''A photo of a cat'''
lowercase : Tuple = tokenizer.encode(
snake_case__ , )
self.assertEqual(snake_case__ , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('''test_opt''' )
lowercase : Optional[Any] = AutoTokenizer.from_pretrained('''./test_opt''' )
lowercase : Optional[Any] = tokenizer.encode(
snake_case__ , )
self.assertEqual(snake_case__ , [2, 250, 1345, 9, 10, 4758] )
def __lowerCamelCase ( self ):
lowercase : Any = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=snake_case__ )
lowercase : Any = '''A photo of a cat'''
lowercase : List[Any] = tokenizer.encode(
snake_case__ , )
# Same as above
self.assertEqual(snake_case__ , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def __lowerCamelCase ( self ):
lowercase : Any = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=snake_case__ )
lowercase : Any = '''bos'''
lowercase : int = tokenizer.get_vocab()['''bos''']
lowercase : Any = '''A photo of a cat'''
lowercase : Optional[Any] = tokenizer.encode(
snake_case__ , )
# We changed the bos token
self.assertEqual(snake_case__ , [31957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('''./tok''' )
lowercase : Optional[int] = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
lowercase : List[Any] = tokenizer.encode(
snake_case__ , )
self.assertEqual(snake_case__ , [31957, 250, 1345, 9, 10, 4758] )
| 319 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__SCREAMING_SNAKE_CASE : int =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[str] ={
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__SCREAMING_SNAKE_CASE : Optional[Any] ={
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__SCREAMING_SNAKE_CASE : str ={
'''facebook/blenderbot_small-90M''': 512,
}
class A_ ( __a ):
_A :List[str] = VOCAB_FILES_NAMES
_A :Dict = PRETRAINED_VOCAB_FILES_MAP
_A :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A :List[Any] = BlenderbotSmallTokenizer
def __init__( self : List[Any] , snake_case__ : Optional[Any]=None , snake_case__ : Any=None , snake_case__ : List[Any]="<|endoftext|>" , snake_case__ : Union[str, Any]="<|endoftext|>" , snake_case__ : Optional[Any]="<|endoftext|>" , snake_case__ : int=False , snake_case__ : List[str]=True , **snake_case__ : List[Any] , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=snake_case__ , merges=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , ) , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , **snake_case__ , )
lowercase = add_prefix_space
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : int , snake_case__ : List[Any]=None ):
lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 428 | 0 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def lowercase_ ( __A : Tuple , __A : Optional[Any] , __A : Tuple , __A : int , __A : Any ) -> np.array:
"""simple docstring"""
lowercase : Any =int(np.ceil((x_end - xa) / step_size ) )
lowercase : List[str] =np.zeros((n + 1,) )
lowercase : Union[str, Any] =ya
lowercase : str =xa
for k in range(__A ):
lowercase : Any =y[k] + step_size * ode_func(__A , y[k] )
lowercase : List[Any] =y[k] + (
(step_size / 2) * (ode_func(__A , y[k] ) + ode_func(x + step_size , __A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def A_ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] = 9
SCREAMING_SNAKE_CASE_: str = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
SCREAMING_SNAKE_CASE_: Optional[int] = kruskal(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_: Optional[Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(lowercase__ ) == sorted(lowercase__ )
| 671 |
from math import ceil
def _lowerCamelCase( lowercase__ = 1_0_0_1 ) -> int:
'''simple docstring'''
__lowercase= 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__lowercase= 2 * i + 1
__lowercase= 2 * i
__lowercase= total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
lowerCAmelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 230 | 0 |
'''simple docstring'''
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__lowercase :Tuple = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = set()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
def parse_line(_lowerCamelCase : Any ):
for line in fp:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Tuple = line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(_lowerCamelCase ) > 0:
SCREAMING_SNAKE_CASE__ : Any = "\n".join(_lowerCamelCase )
# Only keep the warnings specified in `targets`
if any(f""": {x}: """ in warning for x in targets ):
selected_warnings.add(_lowerCamelCase )
buffer.clear()
continue
else:
SCREAMING_SNAKE_CASE__ : Tuple = line.strip()
buffer.append(_lowerCamelCase )
if from_gh:
for filename in os.listdir(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if not os.path.isdir(_lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(_lowerCamelCase ) as fp:
parse_line(_lowerCamelCase )
else:
try:
with zipfile.ZipFile(_lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_lowerCamelCase ) as fp:
parse_line(_lowerCamelCase )
except Exception:
logger.warning(
f"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = set()
SCREAMING_SNAKE_CASE__ : Tuple = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for p in os.listdir(_lowerCamelCase ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_lowerCamelCase , _lowerCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return values.split("," )
__lowercase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
# optional parameters
parser.add_argument(
"--targets",
default="DeprecationWarning,UserWarning,FutureWarning",
type=list_str,
help="Comma-separated list of target warning(s) which we want to extract.",
)
parser.add_argument(
"--from_gh",
action="store_true",
help="If running from a GitHub action workflow and collecting warnings from its artifacts.",
)
__lowercase :List[str] = parser.parse_args()
__lowercase :Union[str, Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__lowercase :List[str] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("=" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__lowercase :Any = extract_warnings(args.output_dir, args.targets)
__lowercase :List[str] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, "selected_warnings.json"), "w", encoding="UTF-8") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4) | 716 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Dict ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self : Dict ) ->Tuple:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : List[Any] = controlnet_params
SCREAMING_SNAKE_CASE__ : Dict = "bird"
SCREAMING_SNAKE_CASE__ : List[Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
SCREAMING_SNAKE_CASE__ : List[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : int = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : List[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : List[str] = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Dict = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : List[Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def A_ ( self : List[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : Optional[int] = controlnet_params
SCREAMING_SNAKE_CASE__ : Any = "Chef in the kitchen"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
SCREAMING_SNAKE_CASE__ : str = pipe.prepare_image_inputs([pose_image] * num_samples )
SCREAMING_SNAKE_CASE__ : Any = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : List[str] = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : Tuple = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : str = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 | 26 | 0 |
'''simple docstring'''
def _a( UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : list[list[int]] ):
'''simple docstring'''
def update_area_of_max_square(UpperCamelCase__ : int, UpperCamelCase__ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
SCREAMING_SNAKE_CASE__ : int =update_area_of_max_square(UpperCamelCase__, col + 1 )
SCREAMING_SNAKE_CASE__ : Tuple =update_area_of_max_square(row + 1, col + 1 )
SCREAMING_SNAKE_CASE__ : Tuple =update_area_of_max_square(row + 1, UpperCamelCase__ )
if mat[row][col]:
SCREAMING_SNAKE_CASE__ : int =1 + min([right, diagonal, down] )
SCREAMING_SNAKE_CASE__ : Any =max(largest_square_area[0], UpperCamelCase__ )
return sub_problem_sol
else:
return 0
SCREAMING_SNAKE_CASE__ : int =[0]
update_area_of_max_square(0, 0 )
return largest_square_area[0]
def _a( UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : list[list[int]] ):
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
SCREAMING_SNAKE_CASE__ : str =update_area_of_max_square_using_dp_array(UpperCamelCase__, col + 1, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =update_area_of_max_square_using_dp_array(row + 1, col + 1, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =update_area_of_max_square_using_dp_array(row + 1, UpperCamelCase__, UpperCamelCase__ )
if mat[row][col]:
SCREAMING_SNAKE_CASE__ : Any =1 + min([right, diagonal, down] )
SCREAMING_SNAKE_CASE__ : Any =max(largest_square_area[0], UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =sub_problem_sol
return sub_problem_sol
else:
return 0
SCREAMING_SNAKE_CASE__ : Any =[0]
SCREAMING_SNAKE_CASE__ : List[Any] =[[-1] * cols for _ in range(UpperCamelCase__ )]
update_area_of_max_square_using_dp_array(0, 0, UpperCamelCase__ )
return largest_square_area[0]
def _a( UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : list[list[int]] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =[[0] * (cols + 1) for _ in range(rows + 1 )]
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
SCREAMING_SNAKE_CASE__ : Tuple =dp_array[row][col + 1]
SCREAMING_SNAKE_CASE__ : List[Any] =dp_array[row + 1][col + 1]
SCREAMING_SNAKE_CASE__ : Any =dp_array[row + 1][col]
if mat[row][col] == 1:
SCREAMING_SNAKE_CASE__ : int =1 + min(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int =max(dp_array[row][col], UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ : Dict =0
return largest_square_area
def _a( UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : list[list[int]] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =[0] * (cols + 1)
SCREAMING_SNAKE_CASE__ : Optional[Any] =[0] * (cols + 1)
SCREAMING_SNAKE_CASE__ : List[str] =0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
SCREAMING_SNAKE_CASE__ : Tuple =current_row[col + 1]
SCREAMING_SNAKE_CASE__ : str =next_row[col + 1]
SCREAMING_SNAKE_CASE__ : str =next_row[col]
if mat[row][col] == 1:
SCREAMING_SNAKE_CASE__ : int =1 + min(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =max(current_row[col], UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : List[Any] =current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]])) | 296 |
'''simple docstring'''
from __future__ import annotations
def _a( UpperCamelCase__ : list, UpperCamelCase__ : int ):
'''simple docstring'''
if len(UpperCamelCase__ ) <= 1 or n <= 1:
return
insert_next(UpperCamelCase__, n - 1 )
rec_insertion_sort(UpperCamelCase__, n - 1 )
def _a( UpperCamelCase__ : list, UpperCamelCase__ : int ):
'''simple docstring'''
if index >= len(UpperCamelCase__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =(
collection[index],
collection[index - 1],
)
insert_next(UpperCamelCase__, index + 1 )
if __name__ == "__main__":
a_ = input('Enter integers separated by spaces: ')
a_ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list) | 296 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=18 , lowercase__=30 , lowercase__=400 , lowercase__=True , lowercase__=None , lowercase__=True , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = apply_ocr
def A ( self ) -> Union[str, Any]:
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCamelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Any = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def A ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessingTester(self )
@property
def A ( self ) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowercase__ , 'size' ) )
self.assertTrue(hasattr(lowercase__ , 'apply_ocr' ) )
def A ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def A ( self ) -> Dict:
"""simple docstring"""
pass
def A ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , lowercase__ )
self.assertIsInstance(encoding.boxes , lowercase__ )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(lowercase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def A ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(lowercase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def A ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(lowercase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def A ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
SCREAMING_SNAKE_CASE = Image.open(ds[0]['file'] ).convert('RGB' )
SCREAMING_SNAKE_CASE = image_processing(lowercase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
SCREAMING_SNAKE_CASE = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowercase__ )
self.assertListEqual(encoding.boxes , lowercase__ )
# with apply_OCR = False
SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessor(apply_ocr=lowercase__ )
SCREAMING_SNAKE_CASE = image_processing(lowercase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 406 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
SCREAMING_SNAKE_CASE = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
SCREAMING_SNAKE_CASE = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowercase__ ) )
SCREAMING_SNAKE_CASE = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , lowercase__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(lowercase__ , lowercase__ )
def A ( self , **lowercase__ ) -> Optional[int]:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def A ( self , **lowercase__ ) -> Dict:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase__ )
def A ( self , **lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase__ )
def A ( self ) -> Optional[int]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowercase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase__ )
SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase__ )
self.assertIsInstance(processor_fast.tokenizer , lowercase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase__ )
self.assertIsInstance(processor_fast.image_processor , lowercase__ )
def A ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=lowercase__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase__ )
def A ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = image_processor(lowercase__ , return_tensors='np' )
SCREAMING_SNAKE_CASE = processor(images=lowercase__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def A ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
SCREAMING_SNAKE_CASE = 'lower newer'
SCREAMING_SNAKE_CASE = processor(text=lowercase__ )
SCREAMING_SNAKE_CASE = tokenizer(lowercase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
SCREAMING_SNAKE_CASE = 'lower newer'
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=lowercase__ , images=lowercase__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(lowercase__ ):
processor()
def A ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(images=lowercase__ , visual_prompt=lowercase__ )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(lowercase__ ):
processor()
def A ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE = processor.batch_decode(lowercase__ )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
| 406 | 1 |
'''simple docstring'''
__lowerCamelCase = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 100_0000,
"gigajoule": 10_0000_0000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 360_0000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 418_6800.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4E-1_9,
"britishthermalunit_it": 1055.0_5585,
"footpound": 1.35_58_18,
}
def a__ ( UpperCamelCase_ : str, UpperCamelCase_ : str, UpperCamelCase_ : float ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
UpperCAmelCase__ :str = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {", ".join(UpperCamelCase_ )}'''
)
raise ValueError(UpperCamelCase_ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 467 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class UpperCAmelCase ( _snake_case ):
UpperCAmelCase = "autoformer"
UpperCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : List[str] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : str = "student_t" , __lowerCamelCase : str = "nll" , __lowerCamelCase : int = 1 , __lowerCamelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowerCamelCase : bool = True , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : int = 6_4 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 3_2 , __lowerCamelCase : int = 3_2 , __lowerCamelCase : str = "gelu" , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : int = 1_0_0 , __lowerCamelCase : float = 0.02 , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int = 1_0 , __lowerCamelCase : int = 2_5 , __lowerCamelCase : int = 3 , **__lowerCamelCase : int , ):
# time series specific configuration
UpperCAmelCase__ :Any = prediction_length
UpperCAmelCase__ :Optional[int] = context_length if context_length is not None else prediction_length
UpperCAmelCase__ :int = distribution_output
UpperCAmelCase__ :str = loss
UpperCAmelCase__ :Tuple = input_size
UpperCAmelCase__ :Optional[int] = num_time_features
UpperCAmelCase__ :List[Any] = lags_sequence
UpperCAmelCase__ :Any = scaling
UpperCAmelCase__ :Union[str, Any] = num_dynamic_real_features
UpperCAmelCase__ :Dict = num_static_real_features
UpperCAmelCase__ :Optional[int] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase__ :Any = cardinality
else:
UpperCAmelCase__ :Dict = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase__ :List[str] = embedding_dimension
else:
UpperCAmelCase__ :int = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase__ :Optional[Any] = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase__ :Optional[Any] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase__ :Optional[Any] = d_model
UpperCAmelCase__ :Any = encoder_attention_heads
UpperCAmelCase__ :Any = decoder_attention_heads
UpperCAmelCase__ :int = encoder_ffn_dim
UpperCAmelCase__ :Union[str, Any] = decoder_ffn_dim
UpperCAmelCase__ :Optional[Any] = encoder_layers
UpperCAmelCase__ :str = decoder_layers
UpperCAmelCase__ :Optional[Any] = dropout
UpperCAmelCase__ :str = attention_dropout
UpperCAmelCase__ :int = activation_dropout
UpperCAmelCase__ :Optional[Any] = encoder_layerdrop
UpperCAmelCase__ :Dict = decoder_layerdrop
UpperCAmelCase__ :Union[str, Any] = activation_function
UpperCAmelCase__ :List[Any] = init_std
UpperCAmelCase__ :List[str] = use_cache
# Autoformer
UpperCAmelCase__ :List[str] = label_length
UpperCAmelCase__ :Dict = moving_average
UpperCAmelCase__ :str = autocorrelation_factor
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def __SCREAMING_SNAKE_CASE ( self : int ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 467 | 1 |
def lowerCamelCase_ ( UpperCamelCase__ : list ):
'''simple docstring'''
if len(UpperCamelCase__ ) < 2:
return collection
def circle_sort_util(UpperCamelCase__ : list, UpperCamelCase__ : int, UpperCamelCase__ : int ) -> bool:
UpperCamelCase__ = False
if low == high:
return swapped
UpperCamelCase__ = low
UpperCamelCase__ = high
while left < right:
if collection[left] > collection[right]:
UpperCamelCase__ , UpperCamelCase__ = (
collection[right],
collection[left],
)
UpperCamelCase__ = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
UpperCamelCase__ , UpperCamelCase__ = (
collection[right + 1],
collection[left],
)
UpperCamelCase__ = True
UpperCamelCase__ = low + int((high - low) / 2 )
UpperCamelCase__ = circle_sort_util(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
UpperCamelCase__ = circle_sort_util(UpperCamelCase__, mid + 1, UpperCamelCase__ )
return swapped or left_swap or right_swap
UpperCamelCase__ = True
while is_not_sorted is True:
UpperCamelCase__ = circle_sort_util(UpperCamelCase__, 0, len(UpperCamelCase__ ) - 1 )
return collection
if __name__ == "__main__":
lowercase = input("""Enter numbers separated by a comma:\n""").strip()
lowercase = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 712 | from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : list[float], UpperCamelCase__ : int ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(UpperCamelCase__ ):
print(F"""{i}\t\t{d}""" )
def lowerCamelCase_ ( UpperCamelCase__ : list[dict[str, int]], UpperCamelCase__ : list[float], UpperCamelCase__ : int ):
'''simple docstring'''
for j in range(UpperCamelCase__ ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def lowerCamelCase_ ( UpperCamelCase__ : list[dict[str, int]], UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
UpperCamelCase__ = [float('''inf''' )] * vertex_count
UpperCamelCase__ = 0.0
for _ in range(vertex_count - 1 ):
for j in range(UpperCamelCase__ ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
UpperCamelCase__ = distance[u] + w
UpperCamelCase__ = check_negative_cycle(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = int(input("""Enter number of vertices: """).strip())
lowercase = int(input("""Enter number of edges: """).strip())
lowercase = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
lowercase , lowercase , lowercase = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
lowercase = {"""src""": src, """dst""": dest, """weight""": weight}
lowercase = int(input("""\nEnter shortest path source:""").strip())
lowercase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 591 | 0 |
from __future__ import annotations
def __snake_case ( lowerCAmelCase_ ) -> list[int]:
SCREAMING_SNAKE_CASE__ = [True] * limit
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
SCREAMING_SNAKE_CASE__ = i * 2
while index < limit:
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = index + i
SCREAMING_SNAKE_CASE__ = [2]
for i in range(3 , lowerCAmelCase_ , 2 ):
if is_prime[i]:
primes.append(lowerCAmelCase_ )
return primes
def __snake_case ( lowerCAmelCase_ = 1_0_0_0_0_0_0 ) -> int:
SCREAMING_SNAKE_CASE__ = prime_sieve(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + length , len(lowerCAmelCase_ ) ):
SCREAMING_SNAKE_CASE__ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
SCREAMING_SNAKE_CASE__ = j - i
SCREAMING_SNAKE_CASE__ = sol
return largest
if __name__ == "__main__":
print(F'{solution() = }')
| 100 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self :Union[str, Any] ):
__lowerCamelCase : List[Any] =TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
__lowerCamelCase : Dict ={
'''input_ids''': tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
__lowerCamelCase : Optional[int] =model(__lowercase )['''last_hidden_state''']
__lowerCamelCase : str =tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , __lowercase )
# compare the actual values for a slice.
__lowerCamelCase : str =tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 179 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 703 | '''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__lowercase = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__lowercase = concatenate_datasets
__lowercase = DownloadConfig
__lowercase = DownloadManager
__lowercase = DownloadMode
__lowercase = DownloadConfig
__lowercase = DownloadMode
__lowercase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 605 | 0 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = RoCBertTokenizer
lowercase_ = None
lowercase_ = False
lowercase_ = True
lowercase_ = filter_non_english
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[str]:
'''simple docstring'''
super().setUp()
lowerCamelCase__: int =["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
lowerCamelCase__: Dict ={}
lowerCamelCase__: Dict ={}
for i, value in enumerate(UpperCAmelCase_):
lowerCamelCase__: Any =i
lowerCamelCase__: List[str] =i
lowerCamelCase__: List[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
lowerCamelCase__: Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"])
lowerCamelCase__: Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
with open(self.word_shape_file , "w" , encoding="utf-8") as word_shape_writer:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_)
with open(self.word_pronunciation_file , "w" , encoding="utf-8") as word_pronunciation_writer:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
lowerCamelCase__: int =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
lowerCamelCase__: Union[str, Any] =tokenizer.tokenize("你好[SEP]你是谁")
self.assertListEqual(UpperCAmelCase_ , ["你", "好", "[SEP]", "你", "是", "谁"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase_) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase_) , [5, 6, 2, 5, 7, 8])
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->int:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz") , ["ah", "\u535A", "\u63A8", "zz"])
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Any =RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["hello", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Dict =RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hällo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["h\u00E9llo"])
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Tuple =RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Dict =RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["HeLLo", "!", "how", "Are", "yoU", "?"])
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->int:
'''simple docstring'''
lowerCamelCase__: Dict =RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HäLLo", "!", "how", "Are", "yoU", "?"])
def SCREAMING_SNAKE_CASE_ (self : int) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ , strip_accents=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HaLLo", "!", "how", "Are", "yoU", "?"])
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase_ , never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]") , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"])
def SCREAMING_SNAKE_CASE_ (self : Dict) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowerCamelCase__: Dict ={}
for i, token in enumerate(UpperCAmelCase_):
lowerCamelCase__: Any =i
lowerCamelCase__: Union[str, Any] =RoCBertWordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize("") , [])
self.assertListEqual(tokenizer.tokenize("unwanted running") , ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running") , ["[UNK]", "runn", "##ing"])
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->List[str]:
'''simple docstring'''
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[str]:
'''simple docstring'''
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->int:
'''simple docstring'''
lowerCamelCase__: Any =self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase_) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]])
if self.test_rust_tokenizer:
lowerCamelCase__: Tuple =self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase_) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]])
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
lowerCamelCase__: str =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Optional[int] =F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
lowerCamelCase__: Optional[int] =tokenizer_r.encode_plus(
UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , )
lowerCamelCase__: Tuple =tokenizer_r.do_lower_case if hasattr(UpperCAmelCase_ , "do_lower_case") else False
lowerCamelCase__: Tuple =(
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"]))
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"])
def SCREAMING_SNAKE_CASE_ (self : str) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =["的", "人", "有"]
lowerCamelCase__: List[str] ="".join(UpperCAmelCase_)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
lowerCamelCase__: Dict =True
lowerCamelCase__: Optional[Any] =self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: int =tokenizer_p.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
lowerCamelCase__: List[Any] =tokenizer_r.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
lowerCamelCase__: Tuple =tokenizer_r.convert_ids_to_tokens(UpperCAmelCase_)
lowerCamelCase__: List[str] =tokenizer_p.convert_ids_to_tokens(UpperCAmelCase_)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =False
lowerCamelCase__: Tuple =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: List[Any] =self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Tuple =tokenizer_r.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
lowerCamelCase__: str =tokenizer_p.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
lowerCamelCase__: Optional[int] =tokenizer_r.convert_ids_to_tokens(UpperCAmelCase_)
lowerCamelCase__: Tuple =tokenizer_p.convert_ids_to_tokens(UpperCAmelCase_)
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase__: List[str] =[
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase_)
]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Tuple =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
lowerCamelCase__: int =tokenizer.encode("你好" , add_special_tokens=UpperCAmelCase_)
lowerCamelCase__: Any =tokenizer.encode("你是谁" , add_special_tokens=UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->str:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.get_tokenizers(do_lower_case=UpperCAmelCase_)
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
lowerCamelCase__: Union[str, Any] ="你好,你是谁"
lowerCamelCase__: str =tokenizer.tokenize(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =tokenizer.convert_tokens_to_ids(UpperCAmelCase_)
lowerCamelCase__: int =tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase_)
lowerCamelCase__: str =tokenizer.prepare_for_model(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
lowerCamelCase__: List[Any] =tokenizer.encode_plus(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
| 59 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = 'Hello, World!'
__SCREAMING_SNAKE_CASE = 'en_XX'
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : str ,lowerCAmelCase_ : str ,lowerCAmelCase_ : bool ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] =Path('data_bin' )
SCREAMING_SNAKE_CASE_ : Any =FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(lowerCAmelCase_ ).parent ) ,checkpoint_file=Path(lowerCAmelCase_ ).name ,_name='xmod_base' ,arch='xmod_base' ,task='multilingual_masked_lm' ,data_name_or_path=str(lowerCAmelCase_ ) ,bpe='sentencepiece' ,sentencepiece_model=str(Path(lowerCAmelCase_ ).parent / 'sentencepiece.bpe.model' ) ,src_dict=str(data_dir / 'dict.txt' ) ,)
xmod.eval() # disable dropout
print(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] =xmod.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE_ : Union[str, Any] =XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings ,hidden_size=xmod.cfg.model.encoder_embed_dim ,num_hidden_layers=xmod.cfg.model.encoder_layers ,num_attention_heads=xmod.cfg.model.encoder_attention_heads ,intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1e-5 ,pre_norm=xmod.cfg.model.encoder_normalize_before ,adapter_reduction_factor=getattr(xmod.cfg.model ,'bottleneck' ,2 ) ,adapter_layer_norm=xmod.cfg.model.adapter_layer_norm ,adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm ,ln_before_adapter=xmod.cfg.model.ln_before_adapter ,languages=xmod.cfg.model.languages ,)
if classification_head:
SCREAMING_SNAKE_CASE_ : Any =xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' ,lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple =XmodForSequenceClassification(lowerCAmelCase_ ) if classification_head else XmodForMaskedLM(lowerCAmelCase_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE_ : Any =xmod_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE_ : Dict =xmod_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
SCREAMING_SNAKE_CASE_ : Dict =xmod_sent_encoder.layernorm_embedding.weight
SCREAMING_SNAKE_CASE_ : Tuple =xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE_ : Dict =model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE_ : List[Any] =xmod_sent_encoder.layers[i]
# self attention
SCREAMING_SNAKE_CASE_ : int =layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
SCREAMING_SNAKE_CASE_ : Tuple =xmod_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE_ : int =xmod_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE_ : Any =xmod_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE_ : Optional[int] =xmod_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE_ : Union[str, Any] =xmod_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE_ : int =xmod_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE_ : Dict =layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
SCREAMING_SNAKE_CASE_ : Optional[Any] =xmod_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE_ : Tuple =xmod_layer.self_attn.out_proj.bias
SCREAMING_SNAKE_CASE_ : Any =xmod_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE_ : Optional[Any] =xmod_layer.self_attn_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE_ : Optional[int] =layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
SCREAMING_SNAKE_CASE_ : int =xmod_layer.fca.weight
SCREAMING_SNAKE_CASE_ : List[Any] =xmod_layer.fca.bias
# output
SCREAMING_SNAKE_CASE_ : Union[str, Any] =layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
SCREAMING_SNAKE_CASE_ : Any =xmod_layer.fca.weight
SCREAMING_SNAKE_CASE_ : Dict =xmod_layer.fca.bias
SCREAMING_SNAKE_CASE_ : str =xmod_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE_ : List[str] =xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
SCREAMING_SNAKE_CASE_ : Dict =xmod_layer.adapter_layer_norm.weight
SCREAMING_SNAKE_CASE_ : Union[str, Any] =xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
SCREAMING_SNAKE_CASE_ : Union[str, Any] =bert_output.adapter_modules[lang_code]
SCREAMING_SNAKE_CASE_ : Tuple =xmod_layer.adapter_modules[lang_code]
SCREAMING_SNAKE_CASE_ : int =from_adapter.fca.weight
SCREAMING_SNAKE_CASE_ : Union[str, Any] =from_adapter.fca.bias
SCREAMING_SNAKE_CASE_ : Optional[Any] =from_adapter.fca.weight
SCREAMING_SNAKE_CASE_ : int =from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
SCREAMING_SNAKE_CASE_ : str =xmod_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE_ : Optional[int] =xmod_sent_encoder.layer_norm.bias
if classification_head:
SCREAMING_SNAKE_CASE_ : List[Any] =xmod.model.classification_heads['mnli'].dense.weight
SCREAMING_SNAKE_CASE_ : Optional[Any] =xmod.model.classification_heads['mnli'].dense.bias
SCREAMING_SNAKE_CASE_ : Dict =xmod.model.classification_heads['mnli'].out_proj.weight
SCREAMING_SNAKE_CASE_ : Optional[int] =xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE_ : Optional[int] =xmod.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE_ : Tuple =xmod.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE_ : Dict =xmod.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE_ : Tuple =xmod.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE_ : List[str] =xmod.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE_ : Tuple =xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE_ : Optional[Any] =xmod.encode(lowerCAmelCase_ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] =model(lowerCAmelCase_ )[0]
if classification_head:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =xmod.model.classification_heads['mnli'](xmod.extract_features(lowerCAmelCase_ ) )
else:
SCREAMING_SNAKE_CASE_ : List[str] =xmod.model(lowerCAmelCase_ ,lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape ,their_output.shape )
SCREAMING_SNAKE_CASE_ : str =torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
SCREAMING_SNAKE_CASE_ : str =torch.allclose(lowerCAmelCase_ ,lowerCAmelCase_ ,atol=1e-3 )
print('Do both models output the same tensors?' ,'🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(lowerCAmelCase_ ).mkdir(parents=lowerCAmelCase_ ,exist_ok=lowerCAmelCase_ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 220 | 0 |
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
A_ : List[str] = 25_6047
A_ : Union[str, Any] = 25_6145
@require_sentencepiece
@require_tokenizers
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = NllbTokenizer
UpperCAmelCase__: int = NllbTokenizerFast
UpperCAmelCase__: Union[str, Any] = True
UpperCAmelCase__: Union[str, Any] = True
UpperCAmelCase__: List[str] = {}
def __A ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
A__ : Tuple = NllbTokenizer(A__ , keep_accents=A__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ):
A__ : Optional[int] = NllbTokenizer(A__ , keep_accents=A__ )
A__ : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A__ : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : Dict = tokenizer.convert_tokens_to_ids(A__ )
self.assertListEqual(
A__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ : str = tokenizer.convert_ids_to_tokens(A__ )
self.assertListEqual(
A__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __A ( self ):
A__ : int = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ : Tuple = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
A__ : List[Any] = self.tokenizer_class.from_pretrained(A__ , **A__ )
A__ : Optional[int] = tempfile.mkdtemp()
A__ : str = tokenizer_r.save_pretrained(A__ )
A__ : str = tokenizer_p.save_pretrained(A__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
A__ : List[str] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A__ , A__ )
# Checks everything loads correctly in the same way
A__ : str = tokenizer_r.from_pretrained(A__ )
A__ : Union[str, Any] = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
shutil.rmtree(A__ )
# Save tokenizer rust, legacy_format=True
A__ : str = tempfile.mkdtemp()
A__ : Optional[Any] = tokenizer_r.save_pretrained(A__ , legacy_format=A__ )
A__ : Union[str, Any] = tokenizer_p.save_pretrained(A__ )
# Checks it save with the same files
self.assertSequenceEqual(A__ , A__ )
# Checks everything loads correctly in the same way
A__ : Optional[int] = tokenizer_r.from_pretrained(A__ )
A__ : Union[str, Any] = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
shutil.rmtree(A__ )
# Save tokenizer rust, legacy_format=False
A__ : Dict = tempfile.mkdtemp()
A__ : int = tokenizer_r.save_pretrained(A__ , legacy_format=A__ )
A__ : Optional[int] = tokenizer_p.save_pretrained(A__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A__ : int = tokenizer_r.from_pretrained(A__ )
A__ : str = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
shutil.rmtree(A__ )
@require_torch
def __A ( self ):
if not self.test_seqaseq:
return
A__ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
A__ : Tuple = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"""
""" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
A__ : Tuple = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
A__ : Dict = tokenizer.prepare_seqaseq_batch(
src_texts=A__ , tgt_texts=A__ , max_length=3 , max_target_length=10 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
A__ : Optional[int] = tokenizer.prepare_seqaseq_batch(
A__ , tgt_texts=A__ , max_length=3 , return_tensors="""pt""" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
A__ : List[Any] = tokenizer.prepare_seqaseq_batch(
src_texts=A__ , max_length=3 , max_target_length=10 , return_tensors="""pt""" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("""decoder_input_ids""" , A__ )
@unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" )
def __A ( self ):
pass
def __A ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ : int = [AddedToken("""<special>""" , lstrip=A__ )]
A__ : List[str] = self.rust_tokenizer_class.from_pretrained(
A__ , additional_special_tokens=A__ , **A__ )
A__ : Tuple = tokenizer_r.encode("""Hey this is a <special> token""" )
A__ : int = tokenizer_r.encode("""<special>""" , add_special_tokens=A__ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
A__ : Dict = self.rust_tokenizer_class.from_pretrained(
A__ , additional_special_tokens=A__ , **A__ , )
A__ : Union[str, Any] = self.tokenizer_class.from_pretrained(
A__ , additional_special_tokens=A__ , **A__ )
A__ : Tuple = tokenizer_p.encode("""Hey this is a <special> token""" )
A__ : Any = tokenizer_cr.encode("""Hey this is a <special> token""" )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a (unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[Any] = '''facebook/nllb-200-distilled-600M'''
UpperCAmelCase__: List[Any] = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
UpperCAmelCase__: Optional[Any] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
UpperCAmelCase__: Optional[int] = [
25_60_47,
1_62_97,
13_44_08,
81_65,
24_80_66,
1_47_34,
9_50,
11_35,
10_57_21,
35_73,
83,
2_73_52,
1_08,
4_94_86,
2,
]
@classmethod
def __A ( cls ):
A__ : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" )
A__ : Any = 1
return cls
def __A ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 25_6001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 25_6002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 25_6057 )
def __A ( self ):
A__ : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A__ )
def __A ( self ):
self.assertIn(A__ , self.tokenizer.all_special_ids )
# fmt: off
A__ : Union[str, Any] = [RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
A__ : List[Any] = self.tokenizer.decode(A__ , skip_special_tokens=A__ )
A__ : Optional[int] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A__ )
self.assertEqual(A__ , A__ )
self.assertNotIn(self.tokenizer.eos_token , A__ )
def __A ( self ):
A__ : Optional[int] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , A__ )
A__ : int = 10
A__ : List[Any] = self.tokenizer(A__ , max_length=A__ , truncation=A__ ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , A__ )
self.assertEqual(len(A__ ) , A__ )
def __A ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_6203, 3] )
def __A ( self ):
A__ : int = tempfile.mkdtemp()
A__ : str = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A__ )
A__ : List[str] = NllbTokenizer.from_pretrained(A__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A__ )
@require_torch
def __A ( self ):
A__ : List[str] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A__ , truncation=A__ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
A__ : Optional[Any] = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""] )
self.assertIsInstance(A__ , A__ )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
A__ : int = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A__ )
self.assertEqual(A__ , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __A ( self ):
A__ : List[str] = self.tokenizer(self.src_text , padding=A__ , truncation=A__ , max_length=3 , return_tensors="""pt""" )
A__ : str = self.tokenizer(
text_target=self.tgt_text , padding=A__ , truncation=A__ , max_length=10 , return_tensors="""pt""" )
A__ : Optional[int] = targets["""input_ids"""]
A__ : Dict = shift_tokens_right(
A__ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __A ( self ):
A__ : List[Any] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
nested_simplify(A__ ) , {
# A, test, EOS, en_XX
"""input_ids""": [[25_6047, 70, 7356, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_6057,
} , )
@require_torch
def __A ( self ):
A__ : str = True
A__ : int = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] )
A__ : Any = False
A__ : Any = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
| 64 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A_ : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class _a (datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__: Optional[datasets.Features] = None
UpperCAmelCase__: str = "utf-8"
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: bool = True # deprecated
UpperCAmelCase__: Optional[int] = None # deprecated
UpperCAmelCase__: int = 10 << 20 # 10MB
UpperCAmelCase__: Optional[bool] = None
class _a (datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__: List[str] = JsonConfig
def __A ( self ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
A__ : Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , A__ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A__ , (str, list, tuple) ):
A__ : Optional[Any] = data_files
if isinstance(A__ , A__ ):
A__ : List[str] = [files]
A__ : int = [dl_manager.iter_files(A__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ : List[str] = []
for split_name, files in data_files.items():
if isinstance(A__ , A__ ):
A__ : Optional[int] = [files]
A__ : Optional[int] = [dl_manager.iter_files(A__ ) for file in files]
splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , A__ ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A__ : Optional[Any] = self.config.features.arrow_schema.field(A__ ).type
A__ : str = pa_table.append_column(A__ , pa.array([None] * len(A__ ) , type=A__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ : Optional[int] = table_cast(A__ , self.config.features.arrow_schema )
return pa_table
def __A ( self , A__ ):
for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
# We keep only the field we are interested in
A__ : Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(A__ , (list, tuple) ):
A__ : Union[str, Any] = set().union(*[row.keys() for row in dataset] )
A__ : Any = {col: [row.get(A__ ) for row in dataset] for col in keys}
else:
A__ : Any = dataset
A__ : Any = pa.Table.from_pydict(A__ )
yield file_idx, self._cast_table(A__ )
# If the file has one json object per line
else:
with open(A__ , """rb""" ) as f:
A__ : List[str] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ : List[str] = max(self.config.chunksize // 32 , 16 << 10 )
A__ : Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
A__ : Dict = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(A__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ : List[Any] = batch.decode(self.config.encoding , errors=A__ ).encode("""utf-8""" )
try:
while True:
try:
A__ : str = paj.read_json(
io.BytesIO(A__ ) , read_options=paj.ReadOptions(block_size=A__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(A__ , pa.ArrowInvalid )
and "straddling" not in str(A__ )
or block_size > len(A__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(A__ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(A__ , A__ ): # list is the only sequence type supported in JSON
try:
A__ : str = set().union(*[row.keys() for row in dataset] )
A__ : List[str] = {col: [row.get(A__ ) for row in dataset] for col in keys}
A__ : int = pa.Table.from_pydict(A__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(A__ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A__ )
batch_idx += 1
| 64 | 1 |
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_A = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def A_ ( __SCREAMING_SNAKE_CASE : Dict ) -> int:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def A_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> Dict:
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : Optional[int] = False
elif args.student_type == "gpt2":
__SCREAMING_SNAKE_CASE : Tuple = False
def A_ ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]:
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : int = False
def A_ ( ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=__SCREAMING_SNAKE_CASE , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=__SCREAMING_SNAKE_CASE , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=__SCREAMING_SNAKE_CASE , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=__SCREAMING_SNAKE_CASE , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=__SCREAMING_SNAKE_CASE , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=__SCREAMING_SNAKE_CASE , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=__SCREAMING_SNAKE_CASE , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=__SCREAMING_SNAKE_CASE , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=__SCREAMING_SNAKE_CASE , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=__SCREAMING_SNAKE_CASE , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=__SCREAMING_SNAKE_CASE , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=__SCREAMING_SNAKE_CASE , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=__SCREAMING_SNAKE_CASE , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=__SCREAMING_SNAKE_CASE , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=__SCREAMING_SNAKE_CASE , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=__SCREAMING_SNAKE_CASE , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=__SCREAMING_SNAKE_CASE , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__SCREAMING_SNAKE_CASE , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=__SCREAMING_SNAKE_CASE , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=__SCREAMING_SNAKE_CASE , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=__SCREAMING_SNAKE_CASE , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=__SCREAMING_SNAKE_CASE , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=__SCREAMING_SNAKE_CASE , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=__SCREAMING_SNAKE_CASE , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=__SCREAMING_SNAKE_CASE , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=__SCREAMING_SNAKE_CASE , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=__SCREAMING_SNAKE_CASE , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=__SCREAMING_SNAKE_CASE , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=__SCREAMING_SNAKE_CASE , default=5_00 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=__SCREAMING_SNAKE_CASE , default=40_00 , help='''Checkpoint interval.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
sanity_checks(__SCREAMING_SNAKE_CASE )
# ARGS #
init_gpu_params(__SCREAMING_SNAKE_CASE )
set_seed(__SCREAMING_SNAKE_CASE )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , indent=4 )
git_log(args.dump_path )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = MODEL_CLASSES[args.student_type]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__SCREAMING_SNAKE_CASE : Any = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__SCREAMING_SNAKE_CASE : str = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.all_special_tokens.index(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Any = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
__SCREAMING_SNAKE_CASE : Optional[Any] = special_tok_ids
__SCREAMING_SNAKE_CASE : Dict = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : str = pickle.load(__SCREAMING_SNAKE_CASE )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : Dict = pickle.load(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[str] = np.maximum(__SCREAMING_SNAKE_CASE , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__SCREAMING_SNAKE_CASE : List[Any] = 0.0 # do not predict special tokens
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : Optional[Any] = LmSeqsDataset(params=__SCREAMING_SNAKE_CASE , data=__SCREAMING_SNAKE_CASE )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
__SCREAMING_SNAKE_CASE : List[Any] = student_config_class.from_pretrained(args.student_config )
__SCREAMING_SNAKE_CASE : int = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
__SCREAMING_SNAKE_CASE : Tuple = student_model_class.from_pretrained(args.student_pretrained_weights , config=__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE : Any = student_model_class(__SCREAMING_SNAKE_CASE )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info('''Student loaded.''' )
# TEACHER #
__SCREAMING_SNAKE_CASE : Union[str, Any] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__SCREAMING_SNAKE_CASE )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__SCREAMING_SNAKE_CASE : Optional[Any] = Distiller(
params=__SCREAMING_SNAKE_CASE , dataset=__SCREAMING_SNAKE_CASE , token_probs=__SCREAMING_SNAKE_CASE , student=__SCREAMING_SNAKE_CASE , teacher=__SCREAMING_SNAKE_CASE )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 158 |
'''simple docstring'''
def A_ ( __SCREAMING_SNAKE_CASE : str ) -> Dict:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = [], []
while len(__SCREAMING_SNAKE_CASE ) > 1:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = min(__SCREAMING_SNAKE_CASE ), max(__SCREAMING_SNAKE_CASE )
start.append(__SCREAMING_SNAKE_CASE )
end.append(__SCREAMING_SNAKE_CASE )
collection.remove(__SCREAMING_SNAKE_CASE )
collection.remove(__SCREAMING_SNAKE_CASE )
end.reverse()
return start + collection + end
if __name__ == "__main__":
_A = input("""Enter numbers separated by a comma:\n""").strip()
_A = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 158 | 1 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
def A_ ( snake_case : List[Any] , snake_case : int , snake_case : List[Any] , snake_case : Dict=None , snake_case : Optional[int]=None ) -> Dict:
'''simple docstring'''
if "." in tensor_name:
__UpperCamelCase = tensor_name.split('''.''' )
for split in splits[:-1]:
__UpperCamelCase = getattr(snake_case , snake_case )
if new_module is None:
raise ValueError(f"{module} has no attribute {split}." )
__UpperCamelCase = new_module
__UpperCamelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}." )
__UpperCamelCase = tensor_name in module._buffers
__UpperCamelCase = getattr(snake_case , snake_case )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
__UpperCamelCase = False
__UpperCamelCase = False
if is_buffer or not is_bitsandbytes_available():
__UpperCamelCase = False
__UpperCamelCase = False
else:
__UpperCamelCase = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
__UpperCamelCase = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
__UpperCamelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
__UpperCamelCase = old_value.to(snake_case )
elif isinstance(snake_case , torch.Tensor ):
__UpperCamelCase = value.to('''cpu''' )
if value.dtype == torch.inta:
__UpperCamelCase = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
__UpperCamelCase = torch.tensor(snake_case , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , snake_case ) and fpaa_statistics is None:
__UpperCamelCase = new_value.T
__UpperCamelCase = old_value.__dict__
if is_abit:
__UpperCamelCase = bnb.nn.IntaParams(snake_case , requires_grad=snake_case , **snake_case ).to(snake_case )
elif is_abit:
__UpperCamelCase = bnb.nn.Paramsabit(snake_case , requires_grad=snake_case , **snake_case ).to(snake_case )
__UpperCamelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(snake_case ) )
else:
if value is None:
__UpperCamelCase = old_value.to(snake_case )
elif isinstance(snake_case , torch.Tensor ):
__UpperCamelCase = value.to(snake_case )
else:
__UpperCamelCase = torch.tensor(snake_case , device=snake_case )
if is_buffer:
__UpperCamelCase = new_value
else:
__UpperCamelCase = nn.Parameter(snake_case , requires_grad=old_value.requires_grad )
__UpperCamelCase = new_value
def A_ ( snake_case : List[Any] , snake_case : List[str]=None , snake_case : str=None , snake_case : Union[str, Any]=None , snake_case : Union[str, Any]=False ) -> Any:
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
__UpperCamelCase = []
current_key_name.append(snake_case )
if (isinstance(snake_case , nn.Linear ) or isinstance(snake_case , snake_case )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(snake_case ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(snake_case , snake_case ):
__UpperCamelCase , __UpperCamelCase = module.weight.shape
else:
__UpperCamelCase = module.in_features
__UpperCamelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
__UpperCamelCase = bnb.nn.LinearabitLt(
snake_case , snake_case , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
__UpperCamelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
__UpperCamelCase = bnb.nn.Linearabit(
snake_case , snake_case , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
__UpperCamelCase = True
# Store the module class in case we need to transpose the weight later
__UpperCamelCase = type(snake_case )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(snake_case )
if len(list(module.children() ) ) > 0:
__UpperCamelCase , __UpperCamelCase = _replace_with_bnb_linear(
snake_case , snake_case , snake_case , snake_case , has_been_replaced=snake_case , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def A_ ( snake_case : Any , snake_case : List[str]=None , snake_case : str=None , snake_case : List[Any]=None ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
__UpperCamelCase , __UpperCamelCase = _replace_with_bnb_linear(
snake_case , snake_case , snake_case , snake_case )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def A_ ( *snake_case : Tuple , **snake_case : Tuple ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , snake_case , )
return replace_with_bnb_linear(*snake_case , **snake_case )
def A_ ( *snake_case : Optional[Any] , **snake_case : List[Any] ) -> Dict:
'''simple docstring'''
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , snake_case , )
return set_module_quantized_tensor_to_device(*snake_case , **snake_case )
def A_ ( snake_case : Any ) -> str:
'''simple docstring'''
__UpperCamelCase = deepcopy(snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
__UpperCamelCase = find_tied_parameters(snake_case )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case , snake_case ):
__UpperCamelCase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__UpperCamelCase = sum(snake_case , [] )
__UpperCamelCase = len(snake_case ) > 0
# Check if it is a base model
__UpperCamelCase = not hasattr(snake_case , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__UpperCamelCase = list(model.named_children() )
__UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
__UpperCamelCase = set(snake_case ) - set(snake_case )
__UpperCamelCase = list(set(snake_case ) ) + list(snake_case )
# remove ".weight" from the keys
__UpperCamelCase = ['''.weight''', '''.bias''']
__UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__UpperCamelCase = name.replace(snake_case , '''''' )
filtered_module_names.append(snake_case )
return filtered_module_names
| 451 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
def A_ ( snake_case : List[str] ) -> List[str]:
'''simple docstring'''
print('''Loading config file...''' )
def flatten_yaml_as_dict(snake_case : Optional[int] , snake_case : List[Any]="" , snake_case : str="." ):
__UpperCamelCase = []
for k, v in d.items():
__UpperCamelCase = parent_key + sep + k if parent_key else k
if isinstance(snake_case , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case , snake_case , sep=snake_case ).items() )
else:
items.append((new_key, v) )
return dict(snake_case )
__UpperCamelCase = argparse.Namespace()
with open(snake_case , '''r''' ) as yaml_file:
try:
__UpperCamelCase = yaml.load(snake_case , Loader=yaml.FullLoader )
__UpperCamelCase = flatten_yaml_as_dict(snake_case )
for k, v in flat_cfg.items():
setattr(snake_case , snake_case , snake_case )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(snake_case , str(snake_case ) ) )
return config
def A_ ( snake_case : List[Any] , snake_case : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = MobileViTVaConfig()
__UpperCamelCase = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
__UpperCamelCase = 1000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__UpperCamelCase = 384
else:
__UpperCamelCase = 256
__UpperCamelCase = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
__UpperCamelCase = 21000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__UpperCamelCase = 384
else:
__UpperCamelCase = 256
__UpperCamelCase = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
__UpperCamelCase = 151
__UpperCamelCase = 512
__UpperCamelCase = '''ade20k-id2label.json'''
__UpperCamelCase = True
elif task_name.startswith('''voc_''' ):
__UpperCamelCase = 21
__UpperCamelCase = 512
__UpperCamelCase = '''pascal-voc-id2label.json'''
__UpperCamelCase = True
# orig_config
__UpperCamelCase = load_orig_config_file(snake_case )
assert getattr(snake_case , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
__UpperCamelCase = getattr(snake_case , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(snake_case , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
__UpperCamelCase = getattr(snake_case , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
__UpperCamelCase = getattr(snake_case , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
__UpperCamelCase = getattr(snake_case , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
__UpperCamelCase = getattr(snake_case , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
__UpperCamelCase = getattr(snake_case , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
__UpperCamelCase = '''huggingface/label-files'''
__UpperCamelCase = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
__UpperCamelCase = {int(snake_case ): v for k, v in idalabel.items()}
__UpperCamelCase = idalabel
__UpperCamelCase = {v: k for k, v in idalabel.items()}
return config
def A_ ( snake_case : List[Any] , snake_case : int , snake_case : Any ) -> str:
'''simple docstring'''
__UpperCamelCase = dct.pop(snake_case )
__UpperCamelCase = val
def A_ ( snake_case : int , snake_case : List[Any]=False ) -> Optional[Any]:
'''simple docstring'''
if base_model:
__UpperCamelCase = ''''''
else:
__UpperCamelCase = '''mobilevitv2.'''
__UpperCamelCase = []
for k in state_dict.keys():
if k[:8] == "encoder.":
__UpperCamelCase = k[8:]
else:
__UpperCamelCase = k
if ".block." in k:
__UpperCamelCase = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
__UpperCamelCase = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
__UpperCamelCase = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
__UpperCamelCase = k_new.replace('''conv_1.''' , f"{model_prefix}conv_stem." )
for i in [1, 2]:
if f"layer_{i}." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}." , f"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
__UpperCamelCase = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
__UpperCamelCase = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f"layer_{i}.0." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.0." , f"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if f"layer_{i}.1.local_rep.0." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.1.local_rep.0." , f"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if f"layer_{i}.1.local_rep.1." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.1.local_rep.1." , f"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
__UpperCamelCase = [0, 1]
elif i == 4:
__UpperCamelCase = [0, 1, 2, 3]
elif i == 5:
__UpperCamelCase = [0, 1, 2]
for j in j_in:
if f"layer_{i}.1.global_rep.{j}." in k:
__UpperCamelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j}." , f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if f"layer_{i}.1.global_rep.{j+1}." in k:
__UpperCamelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j+1}." , f"{model_prefix}encoder.layer.{i-1}.layernorm." )
if f"layer_{i}.1.conv_proj." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.1.conv_proj." , f"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
__UpperCamelCase = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
__UpperCamelCase = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
__UpperCamelCase = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
__UpperCamelCase = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
__UpperCamelCase = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
__UpperCamelCase = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
__UpperCamelCase = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
__UpperCamelCase = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
__UpperCamelCase = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def A_ ( snake_case : List[str] ) -> str:
'''simple docstring'''
__UpperCamelCase = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(snake_case )
for k in keys_to_ignore:
state_dict.pop(snake_case , snake_case )
def A_ ( ) -> str:
'''simple docstring'''
__UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
__UpperCamelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def A_ ( snake_case : Dict , snake_case : List[str] , snake_case : Optional[Any] , snake_case : Optional[int] ) -> int:
'''simple docstring'''
__UpperCamelCase = get_mobilevitva_config(snake_case , snake_case )
# load original state_dict
__UpperCamelCase = torch.load(snake_case , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
__UpperCamelCase = MobileViTVaForSemanticSegmentation(snake_case ).eval()
__UpperCamelCase = False
else:
__UpperCamelCase = MobileViTVaForImageClassification(snake_case ).eval()
__UpperCamelCase = False
# remove and rename some keys of load the original model
__UpperCamelCase = checkpoint
remove_unused_keys(snake_case )
__UpperCamelCase = create_rename_keys(snake_case , base_model=snake_case )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
# load modified state_dict
model.load_state_dict(snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
__UpperCamelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__UpperCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCamelCase = model(**snake_case )
# verify classification model
if task_name.startswith('''imagenet''' ):
__UpperCamelCase = outputs.logits
__UpperCamelCase = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
__UpperCamelCase = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] )
assert torch.allclose(logits[0, :3] , snake_case , atol=1e-4 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(f"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
lowercase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
lowercase__ : Tuple = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 451 | 1 |
def A_ ( lowercase_ = 1000000 ) -> int:
_snake_case : Optional[Any] = limit + 1
_snake_case : Tuple = [0] * limit
for first_term in range(1 , lowercase_ ):
for n in range(lowercase_ , lowercase_ , lowercase_ ):
_snake_case : List[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_snake_case : Union[str, Any] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 326 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
lowerCAmelCase_ = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
lowerCAmelCase_ = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
lowerCAmelCase_ = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A (datasets.Metric ):
def __a ( self ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __a ( self , lowercase_ , lowercase_ , lowercase_=4 , lowercase_=False ) -> str:
'''simple docstring'''
_snake_case : Any = compute_bleu(
reference_corpus=lowercase_ , translation_corpus=lowercase_ , max_order=lowercase_ , smooth=lowercase_ )
((_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case)) : str = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 326 | 1 |
from math import factorial
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(A__ ) // (factorial(A__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f"""fifty-two card deck is: {combinations(5_2, 5)}\n""",
)
print(
'If a class of 40 students must be arranged into groups of',
f"""4 for group projects, there are {combinations(4_0, 4)} ways""",
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f"""are {combinations(1_0, 3)} ways that first, second and""",
'third place can be awarded.',
)
| 714 |
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
| 684 | 0 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :str = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
SCREAMING_SNAKE_CASE :int = parser.parse_args()
if args.model_type == "bert":
SCREAMING_SNAKE_CASE :Optional[int] = BertForMaskedLM.from_pretrained(args.model_name)
SCREAMING_SNAKE_CASE :List[Any] = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
SCREAMING_SNAKE_CASE :List[str] = model.state_dict()
SCREAMING_SNAKE_CASE :Tuple = {}
for w in ["word_embeddings", "position_embeddings"]:
SCREAMING_SNAKE_CASE :Optional[int] = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE :Optional[Any] = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
SCREAMING_SNAKE_CASE :int = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE :List[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
SCREAMING_SNAKE_CASE :Dict = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
SCREAMING_SNAKE_CASE :List[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
SCREAMING_SNAKE_CASE :int = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
SCREAMING_SNAKE_CASE :Union[str, Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
SCREAMING_SNAKE_CASE :List[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
SCREAMING_SNAKE_CASE :Union[str, Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
SCREAMING_SNAKE_CASE :List[str] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
SCREAMING_SNAKE_CASE :Dict = state_dict['''cls.predictions.decoder.weight''']
SCREAMING_SNAKE_CASE :Tuple = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
SCREAMING_SNAKE_CASE :Dict = state_dict[F'''cls.predictions.transform.dense.{w}''']
SCREAMING_SNAKE_CASE :Tuple = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 283 |
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] , lowerCAmelCase_ :int )->Optional[int]:
'''simple docstring'''
snake_case_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _lowerCAmelCase ( lowerCAmelCase_ :int , lowerCAmelCase_ :List[str] , lowerCAmelCase_ :Dict )->Dict:
'''simple docstring'''
snake_case_ = 0
while b > 0:
if b & 1:
snake_case_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 283 | 1 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__snake_case = logging.get_logger(__name__)
class lowercase__ ( _UpperCAmelCase ):
A__ : Dict =["""input_features""", """is_longer"""]
def __init__( self : Any , UpperCAmelCase_ : int=64 , UpperCAmelCase_ : List[str]=48000 , UpperCAmelCase_ : List[Any]=480 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : int=1024 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : float = 14000 , UpperCAmelCase_ : int = None , UpperCAmelCase_ : str = "fusion" , UpperCAmelCase_ : str = "repeatpad" , **UpperCAmelCase_ : List[Any] , ):
super().__init__(
feature_size=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , padding_value=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = top_db
SCREAMING_SNAKE_CASE__ = truncation
SCREAMING_SNAKE_CASE__ = padding
SCREAMING_SNAKE_CASE__ = fft_window_size
SCREAMING_SNAKE_CASE__ = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE__ = hop_length
SCREAMING_SNAKE_CASE__ = max_length_s
SCREAMING_SNAKE_CASE__ = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE__ = sampling_rate
SCREAMING_SNAKE_CASE__ = frequency_min
SCREAMING_SNAKE_CASE__ = frequency_max
SCREAMING_SNAKE_CASE__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCAmelCase_ , min_frequency=UpperCAmelCase_ , max_frequency=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , norm=UpperCAmelCase_ , mel_scale='htk' , )
SCREAMING_SNAKE_CASE__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCAmelCase_ , min_frequency=UpperCAmelCase_ , max_frequency=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , norm='slaney' , mel_scale='slaney' , )
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : np.array , UpperCAmelCase_ : Optional[np.array] = None ):
SCREAMING_SNAKE_CASE__ = spectrogram(
UpperCAmelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCAmelCase_ , log_mel='dB' , )
return log_mel_spectrogram.T
def A_ ( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE__ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE__ = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE__ = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE__ = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE__ = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE__ = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE__ = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE__ = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE__ = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE__ = torch.nn.functional.interpolate(
UpperCAmelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : np.array , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE__ = len(UpperCAmelCase_ ) - max_length
SCREAMING_SNAKE_CASE__ = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE__ = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE__ = self._np_extract_fbank_features(UpperCAmelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE__ = self._np_extract_fbank_features(UpperCAmelCase_ , self.mel_filters )
SCREAMING_SNAKE_CASE__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE__ = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE__ = False
else:
SCREAMING_SNAKE_CASE__ = self._random_mel_fusion(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = True
else:
raise NotImplementedError(F'data_truncating {truncation} not implemented' )
else:
SCREAMING_SNAKE_CASE__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE__ = int(max_length / len(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = np.stack(np.tile(UpperCAmelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE__ = int(max_length / len(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = np.stack(np.tile(UpperCAmelCase_ , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = np.pad(UpperCAmelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE__ = self._np_extract_fbank_features(UpperCAmelCase_ , self.mel_filters )
SCREAMING_SNAKE_CASE__ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE__ = self._np_extract_fbank_features(UpperCAmelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[str] , UpperCAmelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase_ : str = None , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase_ : Optional[int] , ):
SCREAMING_SNAKE_CASE__ = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
SCREAMING_SNAKE_CASE__ = isinstance(UpperCAmelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
SCREAMING_SNAKE_CASE__ = is_batched_numpy or (
isinstance(UpperCAmelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE__ = [np.asarray(UpperCAmelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase_ , np.ndarray ):
SCREAMING_SNAKE_CASE__ = np.asarray(UpperCAmelCase_ , dtype=np.floataa )
elif isinstance(UpperCAmelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE__ = [np.asarray(UpperCAmelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE__ = [
self._get_input_mel(UpperCAmelCase_ , max_length if max_length else self.nb_max_samples , UpperCAmelCase_ , UpperCAmelCase_ )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for mel, longer in padded_inputs:
input_mel.append(UpperCAmelCase_ )
is_longer.append(UpperCAmelCase_ )
if truncation == "fusion" and sum(UpperCAmelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE__ = np.random.randint(0 , len(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = True
if isinstance(input_mel[0] , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = [np.asarray(UpperCAmelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE__ = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE__ = {'input_features': input_mel, 'is_longer': is_longer}
SCREAMING_SNAKE_CASE__ = BatchFeature(UpperCAmelCase_ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE__ = input_features.convert_to_tensors(UpperCAmelCase_ )
return input_features
| 719 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ :
def __init__( self : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any=13 , UpperCAmelCase_ : Dict=30 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]=32 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : List[Any]=37 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : Union[str, Any]=None , ):
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ = num_patches + 1
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels
def A_ ( self : List[Any] ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
def A_ ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE__ = TFViTModel(config=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , training=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
SCREAMING_SNAKE_CASE__ = self.image_size // 2
SCREAMING_SNAKE_CASE__ = pixel_values[:, :, :image_size, :image_size]
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_ , training=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def A_ ( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE__ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ = TFViTForImageClassification(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , labels=UpperCAmelCase_ , training=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
SCREAMING_SNAKE_CASE__ = self.image_size // 2
SCREAMING_SNAKE_CASE__ = pixel_values[:, :, :image_size, :image_size]
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_ , training=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = TFViTForImageClassification(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A__ : Tuple =(TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
A__ : str =(
{"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification}
if is_tf_available()
else {}
)
A__ : int =False
A__ : Any =False
A__ : Union[str, Any] =False
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = TFViTModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 )
def A_ ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def A_ ( self : Optional[Any] ):
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def A_ ( self : Dict ):
pass
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , tf.keras.layers.Layer ) )
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(UpperCAmelCase_ )
def _lowercase ( ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowercase__ ( unittest.TestCase ):
@cached_property
def A_ ( self : Optional[int] ):
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=UpperCAmelCase_ , return_tensors='tf' )
# forward pass
SCREAMING_SNAKE_CASE__ = model(**UpperCAmelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tf.constant([-0.2_744, 0.8_215, -0.0_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4 )
| 400 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
def UpperCAmelCase__ ( __magic_name__ : int , __magic_name__ : Dict=False ):
'''simple docstring'''
lowerCAmelCase : List[Any] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def UpperCAmelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any]=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase : Tuple = ''''''
else:
lowerCAmelCase : Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase : str = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowerCAmelCase : Union[str, Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase : List[str] = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase : str = in_proj_bias[: config.hidden_size]
lowerCAmelCase : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase : str = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase : Dict = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ ( __magic_name__ : Any ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def UpperCAmelCase__ ( __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : List[Any] = dct.pop(__magic_name__ )
lowerCAmelCase : List[str] = val
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : str=False ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=__magic_name__ , )
lowerCAmelCase : int = ViTHybridConfig(backbone_config=__magic_name__ , image_size=3_84 , num_labels=10_00 )
lowerCAmelCase : Optional[Any] = False
# load original model from timm
lowerCAmelCase : List[str] = timm.create_model(__magic_name__ , pretrained=__magic_name__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase : List[Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(__magic_name__ )
lowerCAmelCase : Optional[Any] = create_rename_keys(__magic_name__ , __magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , __magic_name__ )
lowerCAmelCase : str = '''huggingface/label-files'''
lowerCAmelCase : str = '''imagenet-1k-id2label.json'''
lowerCAmelCase : int = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase : Any = {int(__magic_name__ ): v for k, v in idalabel.items()}
lowerCAmelCase : Optional[int] = idalabel
lowerCAmelCase : Any = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCAmelCase : str = ViTHybridModel(__magic_name__ ).eval()
else:
lowerCAmelCase : Dict = ViTHybridForImageClassification(__magic_name__ ).eval()
model.load_state_dict(__magic_name__ )
# create image processor
lowerCAmelCase : Any = create_transform(**resolve_data_config({} , model=__magic_name__ ) )
lowerCAmelCase : List[Any] = transform.transforms
lowerCAmelCase : List[str] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowerCAmelCase : Union[str, Any] = ViTHybridImageProcessor(
do_resize=__magic_name__ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__magic_name__ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=__magic_name__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCAmelCase : Optional[int] = prepare_img()
lowerCAmelCase : Tuple = transform(__magic_name__ ).unsqueeze(0 )
lowerCAmelCase : str = processor(__magic_name__ , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__magic_name__ , __magic_name__ )
# verify logits
with torch.no_grad():
lowerCAmelCase : List[str] = model(__magic_name__ )
lowerCAmelCase : str = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
lowerCAmelCase : Optional[int] = timm_model.forward_features(__magic_name__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__magic_name__ , outputs.pooler_output , atol=1e-3 )
else:
lowerCAmelCase : Optional[Any] = timm_model(__magic_name__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__magic_name__ , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 348 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__SCREAMING_SNAKE_CASE : Optional[int] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def UpperCAmelCase__ ( __magic_name__ : Tuple ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def UpperCAmelCase__ ( __magic_name__ : int , __magic_name__ : List[Any] ):
'''simple docstring'''
if args.student_type == "roberta":
lowerCAmelCase : Tuple = False
elif args.student_type == "gpt2":
lowerCAmelCase : Optional[int] = False
def UpperCAmelCase__ ( __magic_name__ : Tuple , __magic_name__ : int ):
'''simple docstring'''
if args.student_type == "roberta":
lowerCAmelCase : Optional[Any] = False
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=__magic_name__ , required=__magic_name__ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=__magic_name__ , required=__magic_name__ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=__magic_name__ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=__magic_name__ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=__magic_name__ , required=__magic_name__ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=__magic_name__ , type=__magic_name__ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=__magic_name__ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=__magic_name__ , required=__magic_name__ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=__magic_name__ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=__magic_name__ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=__magic_name__ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=__magic_name__ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=__magic_name__ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=__magic_name__ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=__magic_name__ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=__magic_name__ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=__magic_name__ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=__magic_name__ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=__magic_name__ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=__magic_name__ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=__magic_name__ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=__magic_name__ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__magic_name__ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=__magic_name__ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=__magic_name__ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5e-4 , type=__magic_name__ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-6 , type=__magic_name__ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=__magic_name__ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=__magic_name__ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=__magic_name__ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=__magic_name__ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=__magic_name__ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=__magic_name__ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=__magic_name__ , default=5_00 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=__magic_name__ , default=40_00 , help='''Checkpoint interval.''' )
lowerCAmelCase : List[Any] = parser.parse_args()
sanity_checks(__magic_name__ )
# ARGS #
init_gpu_params(__magic_name__ )
set_seed(__magic_name__ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(f'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(__magic_name__ ) , __magic_name__ , indent=4 )
git_log(args.dump_path )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = MODEL_CLASSES[args.student_type]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[int] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
lowerCAmelCase : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
lowerCAmelCase : Union[str, Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
lowerCAmelCase : List[Any] = tokenizer.all_special_tokens.index(__magic_name__ )
lowerCAmelCase : Dict = tokenizer.all_special_ids[idx]
logger.info(f'''Special tokens {special_tok_ids}''' )
lowerCAmelCase : Optional[int] = special_tok_ids
lowerCAmelCase : Any = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
lowerCAmelCase : Optional[int] = pickle.load(__magic_name__ )
if args.mlm:
logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
lowerCAmelCase : Tuple = pickle.load(__magic_name__ )
lowerCAmelCase : Tuple = np.maximum(__magic_name__ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
lowerCAmelCase : List[str] = 0.0 # do not predict special tokens
lowerCAmelCase : Any = torch.from_numpy(__magic_name__ )
else:
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : Union[str, Any] = LmSeqsDataset(params=__magic_name__ , data=__magic_name__ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f'''Loading student config from {args.student_config}''' )
lowerCAmelCase : Tuple = student_config_class.from_pretrained(args.student_config )
lowerCAmelCase : str = True
if args.student_pretrained_weights is not None:
logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' )
lowerCAmelCase : str = student_model_class.from_pretrained(args.student_pretrained_weights , config=__magic_name__ )
else:
lowerCAmelCase : str = student_model_class(__magic_name__ )
if args.n_gpu > 0:
student.to(f'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
lowerCAmelCase : str = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__magic_name__ )
if args.n_gpu > 0:
teacher.to(f'''cuda:{args.local_rank}''' )
logger.info(f'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__magic_name__ , __magic_name__ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__magic_name__ , __magic_name__ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
lowerCAmelCase : List[Any] = Distiller(
params=__magic_name__ , dataset=__magic_name__ , token_probs=__magic_name__ , student=__magic_name__ , teacher=__magic_name__ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 348 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __A ( unittest.TestCase ):
def _snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _snake_case ( self ):
torch.manual_seed(0 )
lowerCamelCase =UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def _snake_case ( self ):
torch.manual_seed(0 )
lowerCamelCase =UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=10 , )
return model
@property
def _snake_case ( self ):
torch.manual_seed(0 )
lowerCamelCase =AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
lowerCamelCase =UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def _snake_case ( self ):
lowerCamelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase =Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowerCamelCase =DDPMScheduler()
lowerCamelCase =AudioDiffusionPipeline(vqvae=UpperCAmelCase_ , unet=self.dummy_unet , mel=UpperCAmelCase_ , scheduler=UpperCAmelCase_ )
lowerCamelCase =pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase =torch.Generator(device=UpperCAmelCase_ ).manual_seed(42 )
lowerCamelCase =pipe(generator=UpperCAmelCase_ , steps=4 )
lowerCamelCase =output.audios[0]
lowerCamelCase =output.images[0]
lowerCamelCase =torch.Generator(device=UpperCAmelCase_ ).manual_seed(42 )
lowerCamelCase =pipe(generator=UpperCAmelCase_ , steps=4 , return_dict=UpperCAmelCase_ )
lowerCamelCase =output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowerCamelCase =np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
lowerCamelCase =np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:10]
lowerCamelCase =np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase =Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowerCamelCase =DDIMScheduler()
lowerCamelCase =self.dummy_vqvae_and_unet
lowerCamelCase =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=UpperCAmelCase_ , scheduler=UpperCAmelCase_ )
lowerCamelCase =pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
np.random.seed(0 )
lowerCamelCase =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowerCamelCase =torch.Generator(device=UpperCAmelCase_ ).manual_seed(42 )
lowerCamelCase =pipe(raw_audio=UpperCAmelCase_ , generator=UpperCAmelCase_ , start_step=5 , steps=10 )
lowerCamelCase =output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowerCamelCase =np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
lowerCamelCase =np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase =self.dummy_unet_condition
lowerCamelCase =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=UpperCAmelCase_ , mel=UpperCAmelCase_ , scheduler=UpperCAmelCase_ )
lowerCamelCase =pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
np.random.seed(0 )
lowerCamelCase =torch.rand((1, 1, 10) )
lowerCamelCase =pipe(generator=UpperCAmelCase_ , encoding=UpperCAmelCase_ )
lowerCamelCase =output.images[0]
lowerCamelCase =np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
lowerCamelCase =np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def _snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
lowerCamelCase =torch_device
lowerCamelCase =DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
lowerCamelCase =pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase =torch.Generator(device=UpperCAmelCase_ ).manual_seed(42 )
lowerCamelCase =pipe(generator=UpperCAmelCase_ )
lowerCamelCase =output.audios[0]
lowerCamelCase =output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowerCamelCase =np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
lowerCamelCase =np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 715 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=10_24 , _UpperCAmelCase=10_24 , _UpperCAmelCase=False , **_UpperCAmelCase ) -> Optional[Any]:
lowerCamelCase =AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowerCamelCase =SeqaSeqDataset(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , type_path="""train""" , **_UpperCAmelCase )
lowerCamelCase =tok.pad_token_id
def get_lens(_UpperCAmelCase ):
lowerCamelCase =tqdm(
DataLoader(_UpperCAmelCase , batch_size=5_12 , num_workers=8 , shuffle=_UpperCAmelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowerCamelCase =[]
for batch in dl:
lowerCamelCase =batch["""input_ids"""].ne(_UpperCAmelCase ).sum(1 ).tolist()
lowerCamelCase =batch["""labels"""].ne(_UpperCAmelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(_UpperCAmelCase , _UpperCAmelCase ):
max_lens.append(max(_UpperCAmelCase , _UpperCAmelCase ) )
else:
max_lens.extend(_UpperCAmelCase )
return max_lens
lowerCamelCase =get_lens(_UpperCAmelCase )
lowerCamelCase =SeqaSeqDataset(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , type_path="""val""" , **_UpperCAmelCase )
lowerCamelCase =get_lens(_UpperCAmelCase )
pickle_save(_UpperCAmelCase , train_ds.len_file )
pickle_save(_UpperCAmelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 269 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.