code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import socket
def _UpperCAmelCase ():
_A : str = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_A : int = socket.gethostname()
_A : Optional[int] = 12312
sock.connect((host, port) )
sock.send(B"Hello server!" )
with open("Received_file" , "wb" ) as out_file:
print("File opened" )
print("Receiving data..." )
while True:
_A : Any = sock.recv(1024 )
if not data:
break
out_file.write(UpperCAmelCase__ )
print("Successfully received the file" )
sock.close()
print("Connection closed" )
if __name__ == "__main__":
main()
| 11 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
__lowerCamelCase = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 162 | 0 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __UpperCAmelCase :
@staticmethod
def UpperCamelCase ( *UpperCAmelCase_: Optional[Any] , **UpperCAmelCase_: List[Any] ):
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class __UpperCAmelCase (unittest.TestCase ):
__snake_case : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
_SCREAMING_SNAKE_CASE = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def UpperCamelCase ( self: Any , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = vqa_pipeline(UpperCAmelCase_ , top_k=1 )
self.assertEqual(
UpperCAmelCase_ , [
[{"""score""": ANY(UpperCAmelCase_ ), """answer""": ANY(UpperCAmelCase_ )}],
[{"""score""": ANY(UpperCAmelCase_ ), """answer""": ANY(UpperCAmelCase_ )}],
] , )
@require_torch
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
_SCREAMING_SNAKE_CASE = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_SCREAMING_SNAKE_CASE = """How many cats are there?"""
_SCREAMING_SNAKE_CASE = vqa_pipeline(image=UpperCAmelCase_ , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
UpperCAmelCase_ , [{"""score""": ANY(UpperCAmelCase_ ), """answer""": ANY(UpperCAmelCase_ )}, {"""score""": ANY(UpperCAmelCase_ ), """answer""": ANY(UpperCAmelCase_ )}] )
_SCREAMING_SNAKE_CASE = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
UpperCAmelCase_ , [{"""score""": ANY(UpperCAmelCase_ ), """answer""": ANY(UpperCAmelCase_ )}, {"""score""": ANY(UpperCAmelCase_ ), """answer""": ANY(UpperCAmelCase_ )}] )
@slow
@require_torch
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
_SCREAMING_SNAKE_CASE = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_SCREAMING_SNAKE_CASE = """How many cats are there?"""
_SCREAMING_SNAKE_CASE = vqa_pipeline(image=UpperCAmelCase_ , question=UpperCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}] )
_SCREAMING_SNAKE_CASE = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}] )
_SCREAMING_SNAKE_CASE = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [[{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
pass
| 351 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __UpperCAmelCase :
def __init__( self: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: int=13 , UpperCAmelCase_: List[str]=7 , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Dict=True , UpperCAmelCase_: str=True , UpperCAmelCase_: List[str]=99 , UpperCAmelCase_: int=32 , UpperCAmelCase_: List[Any]=2 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: Dict=37 , UpperCAmelCase_: Tuple="gelu" , UpperCAmelCase_: Any=0.1 , UpperCAmelCase_: List[str]=0.1 , UpperCAmelCase_: List[Any]=512 , UpperCAmelCase_: Optional[int]=16 , UpperCAmelCase_: Optional[Any]=2 , UpperCAmelCase_: Optional[int]=0.02 , UpperCAmelCase_: Union[str, Any]=3 , UpperCAmelCase_: Optional[int]=4 , UpperCAmelCase_: Tuple=None , UpperCAmelCase_: Any=0 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
_SCREAMING_SNAKE_CASE = projection_dim
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
_SCREAMING_SNAKE_CASE = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict , UpperCAmelCase_: Dict , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRContextEncoder(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Any , UpperCAmelCase_: Dict , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRQuestionEncoder(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Dict , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRReader(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Dict = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__snake_case : Optional[Any] = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
__snake_case : str = False
__snake_case : List[Any] = False
__snake_case : Any = False
__snake_case : List[Any] = False
__snake_case : Tuple = False
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCAmelCase_ )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFDPRQuestionEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFDPRReader.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
_SCREAMING_SNAKE_CASE = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 125 | 0 |
import unittest
from knapsack import knapsack as k
class A__ ( unittest.TestCase ):
def a__ ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase = 0
__lowercase = [0]
__lowercase = [0]
__lowercase = len(__UpperCAmelCase )
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , 0 )
__lowercase = [60]
__lowercase = [10]
__lowercase = len(__UpperCAmelCase )
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , 0 )
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 3
__lowercase = [1, 2, 3]
__lowercase = [3, 2, 1]
__lowercase = len(__UpperCAmelCase )
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , 5 )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = 50
__lowercase = [60, 1_00, 1_20]
__lowercase = [10, 20, 30]
__lowercase = len(__UpperCAmelCase )
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , 2_20 )
if __name__ == "__main__":
unittest.main()
| 325 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowercase ( A_ )-> List[Any]:
'''simple docstring'''
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
class _A :
"""simple docstring"""
def __init__( self : str , __UpperCAmelCase : int):
a : List[Any] = metric_id
class _A :
"""simple docstring"""
UpperCAmelCase : Union[str, Any] = [MetricMock(_a ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def __snake_case ( self : List[str]):
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Any:
'''simple docstring'''
if "tmp_path" in args:
a : Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(A_ , match="https://huggingface.co/docs/evaluate" ):
func(*A_ )
| 40 | 0 |
"""simple docstring"""
def A ( ) -> int:
return [
a * b * (1_0_0_0 - a - b)
for a in range(1 , 9_9_9 )
for b in range(snake_case , 9_9_9 )
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 263 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def A ( snake_case :str , snake_case :str = "cpu" , snake_case :Union[str, None] = None ) -> None:
__UpperCamelCase = torch.load(snake_case , map_location=snake_case )
for k, v in tqdm(state_dict.items() ):
if not isinstance(snake_case , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
__UpperCamelCase = v.half()
if save_path is None: # overwrite src_path
__UpperCamelCase = src_path
torch.save(snake_case , snake_case )
if __name__ == "__main__":
fire.Fire(convert)
| 263 | 1 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=24, __a=2, __a=6, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=None, __a=1000, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : int = seq_length
_lowerCAmelCase : Optional[int] = is_training
_lowerCAmelCase : Dict = use_input_mask
_lowerCAmelCase : List[str] = use_token_type_ids
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Optional[Any] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : int = max_position_embeddings
_lowerCAmelCase : Optional[int] = type_vocab_size
_lowerCAmelCase : Optional[Any] = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : List[Any] = num_labels
_lowerCAmelCase : Tuple = scope
_lowerCAmelCase : str = range_bbox
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase : Dict = bbox[i, j, 3]
_lowerCAmelCase : int = bbox[i, j, 1]
_lowerCAmelCase : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase : str = bbox[i, j, 2]
_lowerCAmelCase : List[Any] = bbox[i, j, 0]
_lowerCAmelCase : str = t
_lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
_lowerCAmelCase : Dict = None
if self.use_token_type_ids:
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : Optional[int] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case__ ( self):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = LiltModel(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Dict = model(__a, bbox=__a, attention_mask=__a, token_type_ids=__a)
_lowerCAmelCase : str = model(__a, bbox=__a, token_type_ids=__a)
_lowerCAmelCase : List[Any] = model(__a, bbox=__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Optional[Any] = LiltForTokenClassification(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Dict = model(
__a, bbox=__a, attention_mask=__a, token_type_ids=__a, labels=__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = LiltForQuestionAnswering(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Tuple = model(
__a, bbox=__a, attention_mask=__a, token_type_ids=__a, start_positions=__a, end_positions=__a, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( a , a , a , unittest.TestCase):
lowerCamelCase__ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a, __a, __a):
'''simple docstring'''
return True
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = LiltModelTester(self)
_lowerCAmelCase : Union[str, Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : Any = type
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : str = LiltModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_torch
@slow
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__a)
_lowerCAmelCase : Any = torch.tensor([[1, 2]], device=__a)
_lowerCAmelCase : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]], device=__a)
# forward pass
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(input_ids=__a, bbox=__a)
_lowerCAmelCase : Optional[int] = torch.Size([1, 2, 768])
_lowerCAmelCase : List[str] = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]], device=__a, )
self.assertTrue(outputs.last_hidden_state.shape, __a)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3], __a, atol=1E-3))
| 36 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCAmelCase_ ( a):
def __get__( self, __a, __a=None):
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute")
_lowerCAmelCase : List[Any] = "__cached_" + self.fget.__name__
_lowerCAmelCase : Dict = getattr(__a, __a, __a)
if cached is None:
_lowerCAmelCase : str = self.fget(__a)
setattr(__a, __a, __a)
return cached
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"invalid truth value {val!r}" )
def A ( _lowerCamelCase ):
'''simple docstring'''
if is_torch_fx_proxy(_lowerCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(_lowerCamelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_lowerCamelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_lowerCamelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(_lowerCamelCase , np.ndarray )
def A ( _lowerCamelCase ):
'''simple docstring'''
return isinstance(_lowerCamelCase , np.ndarray )
def A ( _lowerCamelCase ):
'''simple docstring'''
return _is_numpy(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import torch
return isinstance(_lowerCamelCase , torch.Tensor )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import torch
return isinstance(_lowerCamelCase , torch.device )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import torch
if isinstance(_lowerCamelCase , _lowerCamelCase ):
if hasattr(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase )
else:
return False
return isinstance(_lowerCamelCase , torch.dtype )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import tensorflow as tf
return isinstance(_lowerCamelCase , tf.Tensor )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_lowerCamelCase , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(_lowerCamelCase )
return type(_lowerCamelCase ) == tf.Tensor
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(_lowerCamelCase , jnp.ndarray )
def A ( _lowerCamelCase ):
'''simple docstring'''
return False if not is_flax_available() else _is_jax(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
if isinstance(_lowerCamelCase , (dict, UserDict) ):
return {k: to_py_obj(_lowerCamelCase ) for k, v in obj.items()}
elif isinstance(_lowerCamelCase , (list, tuple) ):
return [to_py_obj(_lowerCamelCase ) for o in obj]
elif is_tf_tensor(_lowerCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(_lowerCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_lowerCamelCase ):
return np.asarray(_lowerCamelCase ).tolist()
elif isinstance(_lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def A ( _lowerCamelCase ):
'''simple docstring'''
if isinstance(_lowerCamelCase , (dict, UserDict) ):
return {k: to_numpy(_lowerCamelCase ) for k, v in obj.items()}
elif isinstance(_lowerCamelCase , (list, tuple) ):
return np.array(_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
return obj.numpy()
elif is_torch_tensor(_lowerCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_lowerCamelCase ):
return np.asarray(_lowerCamelCase )
else:
return obj
class UpperCAmelCase_ ( a):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = fields(self)
# Safety and consistency checks
if not len(__a):
raise ValueError(f"{self.__class__.__name__} has no fields.")
if not all(field.default is None for field in class_fields[1:]):
raise ValueError(f"{self.__class__.__name__} should not have more than one required field.")
_lowerCAmelCase : Dict = getattr(self, class_fields[0].name)
_lowerCAmelCase : str = all(getattr(self, field.name) is None for field in class_fields[1:])
if other_fields_are_none and not is_tensor(__a):
if isinstance(__a, __a):
_lowerCAmelCase : Tuple = first_field.items()
_lowerCAmelCase : Dict = True
else:
try:
_lowerCAmelCase : Dict = iter(__a)
_lowerCAmelCase : Any = True
except TypeError:
_lowerCAmelCase : Any = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__a):
if (
not isinstance(__a, (list, tuple))
or not len(__a) == 2
or not isinstance(element[0], __a)
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
_lowerCAmelCase : Any = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"Cannot set key/value for {element}. It needs to be a tuple (key, value).")
break
setattr(self, element[0], element[1])
if element[1] is not None:
_lowerCAmelCase : Any = element[1]
elif first_field is not None:
_lowerCAmelCase : Any = first_field
else:
for field in class_fields:
_lowerCAmelCase : Dict = getattr(self, field.name)
if v is not None:
_lowerCAmelCase : Union[str, Any] = v
def __delitem__( self, *__a, **__a):
'''simple docstring'''
raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.")
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.")
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.")
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.")
def __getitem__( self, __a):
'''simple docstring'''
if isinstance(__a, __a):
_lowerCAmelCase : Optional[int] = dict(self.items())
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self, __a, __a):
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__a, __a)
super().__setattr__(__a, __a)
def __setitem__( self, __a, __a):
'''simple docstring'''
super().__setitem__(__a, __a)
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
return tuple(self[k] for k in self.keys())
class UpperCAmelCase_ ( a , a):
@classmethod
def snake_case__ ( cls, __a):
'''simple docstring'''
raise ValueError(
f"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}")
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'longest'
lowerCamelCase__ = 'max_length'
lowerCamelCase__ = 'do_not_pad'
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'pt'
lowerCamelCase__ = 'tf'
lowerCamelCase__ = 'np'
lowerCamelCase__ = 'jax'
class UpperCAmelCase_ :
def __init__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = context_managers
_lowerCAmelCase : Dict = ExitStack()
def __enter__( self):
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(__a)
def __exit__( self, *__a, **__a):
'''simple docstring'''
self.stack.__exit__(*__a, **__a)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = infer_framework(_lowerCamelCase )
if framework == "tf":
_lowerCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_lowerCAmelCase : str = inspect.signature(model_class.forward ) # PyTorch models
else:
_lowerCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = model_class.__name__
_lowerCAmelCase : Optional[Any] = infer_framework(_lowerCamelCase )
if framework == "tf":
_lowerCAmelCase : Dict = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_lowerCAmelCase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
_lowerCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def A ( _lowerCamelCase , _lowerCamelCase = "" , _lowerCamelCase = "." ):
'''simple docstring'''
def _flatten_dict(_lowerCamelCase , _lowerCamelCase="" , _lowerCamelCase="." ):
for k, v in d.items():
_lowerCAmelCase : Dict = str(_lowerCamelCase ) + delimiter + str(_lowerCamelCase ) if parent_key else k
if v and isinstance(_lowerCamelCase , _lowerCamelCase ):
yield from flatten_dict(_lowerCamelCase , _lowerCamelCase , delimiter=_lowerCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) )
@contextmanager
def A ( _lowerCamelCase , _lowerCamelCase = False ):
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def A ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.transpose(_lowerCamelCase , axes=_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.T if axes is None else array.permute(*_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.transpose(_lowerCamelCase , perm=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.transpose(_lowerCamelCase , axes=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for transpose: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.reshape(_lowerCamelCase , _lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.reshape(*_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.reshape(_lowerCamelCase , _lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.reshape(_lowerCamelCase , _lowerCamelCase )
else:
raise ValueError(F"Type not supported for reshape: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.squeeze(_lowerCamelCase , axis=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for squeeze: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.expand_dims(_lowerCamelCase , _lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.unsqueeze(dim=_lowerCamelCase )
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.expand_dims(_lowerCamelCase , axis=_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return jnp.expand_dims(_lowerCamelCase , axis=_lowerCamelCase )
else:
raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase ):
'''simple docstring'''
if is_numpy_array(_lowerCamelCase ):
return np.size(_lowerCamelCase )
elif is_torch_tensor(_lowerCamelCase ):
return array.numel()
elif is_tf_tensor(_lowerCamelCase ):
import tensorflow as tf
return tf.size(_lowerCamelCase )
elif is_jax_tensor(_lowerCamelCase ):
return array.size
else:
raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(_lowerCamelCase , (tuple, list) ):
_lowerCAmelCase : List[Any] = [F"{repo_id}--{v}" if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
_lowerCAmelCase : Tuple = F"{repo_id}--{value}"
return auto_map
def A ( _lowerCamelCase ):
'''simple docstring'''
for base_class in inspect.getmro(_lowerCamelCase ):
_lowerCAmelCase : Tuple = base_class.__module__
_lowerCAmelCase : int = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"Could not infer framework from class {model_class}." )
| 36 | 1 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : Optional[Any] = CustomTokenizer
pass
| 198 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCAmelCase_ : List[Any] = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def UpperCamelCase ( _A : List[Any] , _A : int=None )-> Optional[int]:
"""simple docstring"""
require_version(deps[pkg] , _A )
| 198 | 1 |
from statistics import mean, stdev
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 3 ):
'''simple docstring'''
__UpperCamelCase :List[str] = min(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = max(SCREAMING_SNAKE_CASE )
# normalize data
return [round((x - x_min) / (x_max - x_min) , SCREAMING_SNAKE_CASE ) for x in data]
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 3 ):
'''simple docstring'''
__UpperCamelCase :List[Any] = mean(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = stdev(SCREAMING_SNAKE_CASE )
# standardize data
return [round((x - mu) / (sigma) , SCREAMING_SNAKE_CASE ) for x in data]
| 43 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
UpperCamelCase = None
UpperCamelCase = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
UpperCamelCase = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class _lowerCamelCase :
"""simple docstring"""
snake_case = True
snake_case = None
# Automatically constructed
snake_case = "PIL.Image.Image"
snake_case = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
snake_case = field(default="Image" , init=UpperCamelCase , repr=UpperCamelCase )
def __call__( self )->int:
'''simple docstring'''
return self.pa_type
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : Optional[int] = np.array(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {"path": value, "bytes": None}
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {"path": None, "bytes": value}
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_SCREAMING_SNAKE_CASE )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None )->"PIL.Image.Image":
'''simple docstring'''
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
A_ : List[str] = {}
A_ , A_ : str = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(_SCREAMING_SNAKE_CASE ):
A_ : List[str] = PIL.Image.open(_SCREAMING_SNAKE_CASE )
else:
A_ : List[str] = path.split('''::''' )[-1]
try:
A_ : int = string_to_dict(_SCREAMING_SNAKE_CASE , config.HUB_DATASETS_URL )['''repo_id''']
A_ : Optional[int] = token_per_repo_id.get(_SCREAMING_SNAKE_CASE )
except ValueError:
A_ : Any = None
with xopen(_SCREAMING_SNAKE_CASE , '''rb''' , use_auth_token=_SCREAMING_SNAKE_CASE ) as f:
A_ : Optional[Any] = BytesIO(f.read() )
A_ : Dict = PIL.Image.open(bytes_ )
else:
A_ : Optional[int] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def _snake_case ( self )->Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type ):
A_ : Dict = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.binary() )
A_ : List[Any] = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
A_ : Dict = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
A_ : List[str] = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
A_ : Tuple = storage.field('''bytes''' )
else:
A_ : Optional[int] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
A_ : Optional[Any] = storage.field('''path''' )
else:
A_ : Optional[int] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
A_ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
A_ : Optional[Any] = pa.array(
[encode_np_array(np.array(_SCREAMING_SNAKE_CASE ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
A_ : str = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
A_ : List[Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE , self.pa_type )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(_SCREAMING_SNAKE_CASE ):
with xopen(_SCREAMING_SNAKE_CASE , '''rb''' ) as f:
A_ : Any = f.read()
return bytes_
A_ : Dict = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
A_ : List[Any] = pa.array(
[os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
A_ : str = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE , self.pa_type )
def _SCREAMING_SNAKE_CASE ( ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
A_ : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Dict = BytesIO()
if image.format in list_image_compression_formats():
A_ : Tuple = image.format
else:
A_ : List[str] = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(SCREAMING_SNAKE_CASE , format=SCREAMING_SNAKE_CASE )
return buffer.getvalue()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if hasattr(SCREAMING_SNAKE_CASE , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(SCREAMING_SNAKE_CASE )}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
A_ : Union[str, Any] = array.dtype
A_ : Dict = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
A_ : Any = dtype.kind
A_ : Any = dtype.itemsize
A_ : Dict = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
A_ : List[Any] = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
A_ : int = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
A_ : Any = dtype_byteorder + dtype_kind + str(SCREAMING_SNAKE_CASE )
A_ : Optional[int] = np.dtype(SCREAMING_SNAKE_CASE )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
A_ : Tuple = PIL.Image.fromarray(array.astype(SCREAMING_SNAKE_CASE ) )
return {"path": None, "bytes": image_to_bytes(SCREAMING_SNAKE_CASE )}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
A_ , A_ : Union[str, Any] = first_non_null_value(SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
A_ : Tuple = no_op_if_value_is_null(SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(SCREAMING_SNAKE_CASE ) for obj in objs]
elif isinstance(SCREAMING_SNAKE_CASE , PIL.Image.Image ):
A_ : List[str] = no_op_if_value_is_null(SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(SCREAMING_SNAKE_CASE ) for obj in objs]
else:
return objs
else:
return objs
| 186 | 0 |
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __A :
'''simple docstring'''
def __init__( self : Optional[Any] ,_snake_case : List[Any] ,_snake_case : Union[str, Any]=sys.maxsize ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[str] = '''bilinear'''
lowercase__ : List[str] = max_size
lowercase__ : List[Any] = short_edge_length
def __call__( self : List[str] ,_snake_case : List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ : int = []
for img in imgs:
lowercase__ : Dict = img.shape[:2]
# later: provide list and randomly choose index for resize
lowercase__ : str = np.random.randint(self.short_edge_length[0] ,self.short_edge_length[1] + 1 )
if size == 0:
return img
lowercase__ : List[Any] = size * 1.0 / min(snake_case_ ,snake_case_ )
if h < w:
lowercase__ : Dict = size, scale * w
else:
lowercase__ : Optional[Any] = scale * h, size
if max(snake_case_ ,snake_case_ ) > self.max_size:
lowercase__ : Tuple = self.max_size * 1.0 / max(snake_case_ ,snake_case_ )
lowercase__ : List[Any] = newh * scale
lowercase__ : int = neww * scale
lowercase__ : Dict = int(neww + 0.5 )
lowercase__ : Dict = int(newh + 0.5 )
if img.dtype == np.uinta:
lowercase__ : Optional[Any] = Image.fromarray(snake_case_ )
lowercase__ : str = pil_image.resize((neww, newh) ,PILImageResampling.BILINEAR )
lowercase__ : Optional[Any] = np.asarray(snake_case_ )
else:
lowercase__ : Tuple = img.permute(2 ,0 ,1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
lowercase__ : Any = nn.functional.interpolate(
snake_case_ ,(newh, neww) ,mode=self.interp_method ,align_corners=snake_case_ ).squeeze(0 )
img_augs.append(snake_case_ )
return img_augs
class __A :
'''simple docstring'''
def __init__( self : int ,_snake_case : List[Any] ) -> int:
"""simple docstring"""
lowercase__ : Dict = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] ,cfg.INPUT.MAX_SIZE_TEST )
lowercase__ : Union[str, Any] = cfg.INPUT.FORMAT
lowercase__ : Dict = cfg.SIZE_DIVISIBILITY
lowercase__ : List[Any] = cfg.PAD_VALUE
lowercase__ : Dict = cfg.INPUT.MAX_SIZE_TEST
lowercase__ : List[Any] = cfg.MODEL.DEVICE
lowercase__ : Tuple = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) ,1 ,1 )
lowercase__ : int = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) ,1 ,1 )
lowercase__ : Union[str, Any] = lambda _snake_case : (x - self.pixel_mean) / self.pixel_std
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Dict ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = tuple(max(snake_case_ ) for s in zip(*[img.shape for img in images] ) )
lowercase__ : List[Any] = [im.shape[-2:] for im in images]
lowercase__ : Optional[int] = [
nn.functional.pad(
snake_case_ ,[0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] ,value=self.pad_value ,)
for size, im in zip(snake_case_ ,snake_case_ )
]
return torch.stack(snake_case_ ), torch.tensor(snake_case_ )
def __call__( self : List[str] ,_snake_case : int ,_snake_case : Dict=False ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
if not isinstance(snake_case_ ,snake_case_ ):
lowercase__ : str = [images]
if single_image:
assert len(snake_case_ ) == 1
for i in range(len(snake_case_ ) ):
if isinstance(images[i] ,torch.Tensor ):
images.insert(snake_case_ ,images.pop(snake_case_ ).to(self.device ).float() )
elif not isinstance(images[i] ,torch.Tensor ):
images.insert(
snake_case_ ,torch.as_tensor(img_tensorize(images.pop(snake_case_ ) ,input_format=self.input_format ) )
.to(self.device )
.float() ,)
# resize smallest edge
lowercase__ : Optional[Any] = torch.tensor([im.shape[:2] for im in images] )
lowercase__ : int = self.aug(snake_case_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
lowercase__ : str = [self.normalizer(snake_case_ ) for x in images]
# now pad them to do the following operations
lowercase__ : List[Any] = self.pad(snake_case_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
lowercase__ : str = torch.true_divide(snake_case_ ,snake_case_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
assert torch.isfinite(lowerCAmelCase__ ).all(), "Box tensor contains infinite or NaN!"
lowercase__ : Optional[Any] = box_size
tensor[:, 0].clamp_(min=0 , max=lowerCAmelCase__ )
tensor[:, 1].clamp_(min=0 , max=lowerCAmelCase__ )
tensor[:, 2].clamp_(min=0 , max=lowerCAmelCase__ )
tensor[:, 3].clamp_(min=0 , max=lowerCAmelCase__ )
| 352 |
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : int ) -> str:
"""simple docstring"""
lowercase__ : List[Any] = '''hf-internal-testing/tiny-random-t5'''
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_snake_case )
lowercase__ : int = AutoModelForSeqaSeqLM.from_pretrained(_snake_case )
lowercase__ : str = tokenizer('''This is me''' ,return_tensors='''pt''' )
lowercase__ : Tuple = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowercase__ : Optional[int] = model.generate(**_snake_case )
lowercase__ : List[Any] = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_snake_case )
lowercase__ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_snake_case )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowercase__ : int = model_reloaded.generate(**_snake_case )
self.assertTrue(torch.allclose(_snake_case ,_snake_case ) )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[str] = '''hf-internal-testing/tiny-random-t5'''
lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_snake_case )
lowercase__ : Union[str, Any] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_snake_case ):
model.save_pretrained(_snake_case )
lowercase__ : int = model.reverse_bettertransformer()
model.save_pretrained(_snake_case )
| 302 | 0 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 30 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> bool:
_a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 63 | 0 |
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = 0
for ch in input_str:
SCREAMING_SNAKE_CASE_: Optional[int] = ord(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = pow(2 , _UpperCAmelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 127 |
import os
# Precomputes a list of the 100 first triangular numbers
lowerCAmelCase : Optional[int] = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def A_ ( ):
SCREAMING_SNAKE_CASE_: List[str] = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_: Union[str, Any] = os.path.join(_UpperCAmelCase , "words.txt" )
SCREAMING_SNAKE_CASE_: Dict = ""
with open(_UpperCAmelCase ) as f:
SCREAMING_SNAKE_CASE_: int = f.readline()
SCREAMING_SNAKE_CASE_: Optional[int] = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
SCREAMING_SNAKE_CASE_: List[Any] = [
word
for word in [sum(ord(_UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 127 | 1 |
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
A =logging.get_logger(__name__)
class _a ( __a ):
__a : Dict = ["""pixel_values"""]
def __init__( self : int , lowercase : bool = True , lowercase : Union[int, float] = 1 / 255 , lowercase : bool = True , lowercase : int = 8 , **lowercase : Optional[int] , ):
'''simple docstring'''
super().__init__(**lowercase )
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_pad
UpperCAmelCase = pad_size
def A ( self : Any , lowercase : np.ndarray , lowercase : float , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : str ):
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A ( self : Optional[int] , lowercase : np.ndarray , lowercase : int , lowercase : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = get_image_size(lowercase )
UpperCAmelCase = (old_height // size + 1) * size - old_height
UpperCAmelCase = (old_width // size + 1) * size - old_width
return pad(lowercase , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=lowercase )
def A ( self : Union[str, Any] , lowercase : ImageInput , lowercase : Optional[bool] = None , lowercase : Optional[float] = None , lowercase : Optional[bool] = None , lowercase : Optional[int] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowercase : Optional[Any] , ):
'''simple docstring'''
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_pad if do_pad is not None else self.do_pad
UpperCAmelCase = pad_size if pad_size is not None else self.pad_size
UpperCAmelCase = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(lowercase ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_pad:
UpperCAmelCase = [self.pad(lowercase , size=lowercase ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 34 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : str = StableDiffusionSAGPipeline
_A : Optional[Any] = TEXT_TO_IMAGE_PARAMS
_A : Any = TEXT_TO_IMAGE_BATCH_PARAMS
_A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
_A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
_A : List[str] = False
def lowerCAmelCase_ ( self: Optional[Any] ) -> str:
torch.manual_seed(0 )
snake_case_ :Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
snake_case_ :Any = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=snake_case , set_alpha_to_one=snake_case , )
torch.manual_seed(0 )
snake_case_ :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ :Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
snake_case_ :Tuple = CLIPTextModel(snake_case )
snake_case_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ :Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase_ ( self: List[str] , snake_case: Tuple , snake_case: List[str]=0 ) -> str:
if str(snake_case ).startswith("""mps""" ):
snake_case_ :Tuple = torch.manual_seed(snake_case )
else:
snake_case_ :Optional[int] = torch.Generator(device=snake_case ).manual_seed(snake_case )
snake_case_ :Any = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase_ ( self: Optional[int] ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: int ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self: int ) -> List[str]:
snake_case_ :Any = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
snake_case_ :int = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
snake_case_ :Union[str, Any] = """."""
snake_case_ :str = torch.manual_seed(0 )
snake_case_ :str = sag_pipe(
[prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
snake_case_ :List[Any] = output.images
snake_case_ :Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ :List[Any] = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCAmelCase_ ( self: Dict ) -> str:
snake_case_ :Tuple = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
snake_case_ :Optional[int] = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
snake_case_ :Tuple = """."""
snake_case_ :Union[str, Any] = torch.manual_seed(0 )
snake_case_ :Tuple = sag_pipe(
[prompt] , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
snake_case_ :Optional[int] = output.images
snake_case_ :Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ :Tuple = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCAmelCase_ ( self: List[str] ) -> List[str]:
snake_case_ :Optional[int] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
snake_case_ :int = sag_pipe.to(snake_case )
sag_pipe.set_progress_bar_config(disable=snake_case )
snake_case_ :Tuple = """."""
snake_case_ :Optional[int] = torch.manual_seed(0 )
snake_case_ :List[str] = sag_pipe(
[prompt] , width=768 , height=512 , generator=snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
snake_case_ :Optional[Any] = output.images
assert image.shape == (1, 512, 768, 3)
| 66 | 0 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def snake_case (__lowercase , __lowercase , __lowercase ) -> int:
'''simple docstring'''
_snake_case : Optional[int] = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
_snake_case : Any = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__lowercase ):
os.makedirs(__lowercase )
_snake_case : Union[str, Any] = model.state_dict()
def to_tf_var_name(__lowercase ):
for patt, repl in iter(__lowercase ):
_snake_case : Dict = name.replace(__lowercase , __lowercase )
return F"""bert/{name}"""
def create_tf_var(__lowercase , __lowercase , __lowercase ):
_snake_case : int = tf.dtypes.as_dtype(tensor.dtype )
_snake_case : List[str] = tf.get_variable(dtype=__lowercase , shape=tensor.shape , name=__lowercase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__lowercase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_snake_case : List[str] = to_tf_var_name(__lowercase )
_snake_case : Optional[Any] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_snake_case : List[Any] = torch_tensor.T
_snake_case : str = create_tf_var(tensor=__lowercase , name=__lowercase , session=__lowercase )
tf.keras.backend.set_value(__lowercase , __lowercase )
_snake_case : Dict = session.run(__lowercase )
print(F"""Successfully created {tf_name}: {np.allclose(__lowercase , __lowercase )}""" )
_snake_case : Tuple = tf.train.Saver(tf.trainable_variables() )
saver.save(__lowercase , os.path.join(__lowercase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def snake_case (__lowercase=None ) -> Tuple:
'''simple docstring'''
_snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__lowercase , required=__lowercase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=__lowercase , default=__lowercase , required=__lowercase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=__lowercase , required=__lowercase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=__lowercase , required=__lowercase , help="Directory in which to save tensorflow model" )
_snake_case : Tuple = parser.parse_args(__lowercase )
_snake_case : Tuple = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__lowercase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main() | 369 | import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : str = "laion/clap-htsat-unfused"
_snake_case : Dict = tempfile.mkdtemp()
def UpperCamelCase ( self , **lowercase_ ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **lowercase_ )
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : Optional[int] = self.get_tokenizer()
_snake_case : List[Any] = self.get_feature_extractor()
_snake_case : Optional[Any] = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
processor.save_pretrained(self.tmpdirname )
_snake_case : Tuple = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Any = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_snake_case : Any = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_snake_case : List[Any] = self.get_feature_extractor(do_normalize=lowercase_ , padding_value=1.0 )
_snake_case : List[Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Tuple = self.get_feature_extractor()
_snake_case : Union[str, Any] = self.get_tokenizer()
_snake_case : Optional[Any] = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
_snake_case : List[str] = floats_list((3, 1_000) )
_snake_case : Union[str, Any] = feature_extractor(lowercase_ , return_tensors="np" )
_snake_case : Any = processor(audios=lowercase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self ):
_snake_case : str = self.get_feature_extractor()
_snake_case : Optional[Any] = self.get_tokenizer()
_snake_case : Dict = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
_snake_case : Any = "This is a test string"
_snake_case : Optional[Any] = processor(text=lowercase_ )
_snake_case : Optional[Any] = tokenizer(lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self ):
_snake_case : Dict = self.get_feature_extractor()
_snake_case : Dict = self.get_tokenizer()
_snake_case : List[Any] = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
_snake_case : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case : List[Any] = processor.batch_decode(lowercase_ )
_snake_case : Optional[int] = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : List[str] = self.get_feature_extractor()
_snake_case : str = self.get_tokenizer()
_snake_case : Optional[int] = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , ) | 284 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowerCamelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _UpperCAmelCase , )
if isinstance(_UpperCAmelCase , torch.Tensor ):
return image
elif isinstance(_UpperCAmelCase , PIL.Image.Image ):
A_ : Optional[Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
A_ , A_ : List[Any] = image[0].size
A_ , A_ : Optional[int] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
A_ : Dict = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
A_ : List[Any] = np.concatenate(_UpperCAmelCase , axis=0 )
A_ : Tuple = np.array(_UpperCAmelCase ).astype(np.floataa ) / 255.0
A_ : Tuple = image.transpose(0 , 3 , 1 , 2 )
A_ : int = 2.0 * image - 1.0
A_ : Optional[int] = torch.from_numpy(_UpperCAmelCase )
elif isinstance(image[0] , torch.Tensor ):
A_ : List[Any] = torch.cat(_UpperCAmelCase , dim=0 )
return image
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
if isinstance(_UpperCAmelCase , torch.Tensor ):
return mask
elif isinstance(_UpperCAmelCase , PIL.Image.Image ):
A_ : Dict = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
A_ , A_ : Union[str, Any] = mask[0].size
A_ , A_ : List[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
A_ : Union[str, Any] = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
A_ : int = np.concatenate(_UpperCAmelCase , axis=0 )
A_ : Optional[int] = mask.astype(np.floataa ) / 255.0
A_ : Union[str, Any] = 0
A_ : Union[str, Any] = 1
A_ : List[Any] = torch.from_numpy(_UpperCAmelCase )
elif isinstance(mask[0] , torch.Tensor ):
A_ : Union[str, Any] = torch.cat(_UpperCAmelCase , dim=0 )
return mask
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : UNetaDModel
__lowerCAmelCase : RePaintScheduler
def __init__( self : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__( self : str , _lowerCamelCase : Union[torch.Tensor, PIL.Image.Image] , _lowerCamelCase : Union[torch.Tensor, PIL.Image.Image] , _lowerCamelCase : int = 2_50 , _lowerCamelCase : float = 0.0 , _lowerCamelCase : int = 10 , _lowerCamelCase : int = 10 , _lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCamelCase : Optional[str] = "pil" , _lowerCamelCase : bool = True , ):
"""simple docstring"""
A_ : Any = image
A_ : Dict = _preprocess_image(_lowerCamelCase )
A_ : Dict = original_image.to(device=self.device , dtype=self.unet.dtype )
A_ : Tuple = _preprocess_mask(_lowerCamelCase )
A_ : Optional[Any] = mask_image.to(device=self.device , dtype=self.unet.dtype )
A_ : Union[str, Any] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_lowerCamelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
A_ : Union[str, Any] = original_image.shape
A_ : Union[str, Any] = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , self.device )
A_ : int = eta
A_ : int = self.scheduler.timesteps[0] + 1
A_ : List[Any] = generator[0] if isinstance(_lowerCamelCase , _lowerCamelCase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
A_ : Union[str, Any] = self.unet(_lowerCamelCase , _lowerCamelCase ).sample
# compute previous image: x_t -> x_t-1
A_ : Any = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
A_ : Tuple = self.scheduler.undo_step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = t
A_ : str = (image / 2 + 0.5).clamp(0 , 1 )
A_ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A_ : Union[str, Any] = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 167 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : List[Any] = """layoutlmv3"""
def __init__( self : Optional[int] , _lowerCamelCase : str=5_02_65 , _lowerCamelCase : Any=7_68 , _lowerCamelCase : int=12 , _lowerCamelCase : str=12 , _lowerCamelCase : int=30_72 , _lowerCamelCase : List[Any]="gelu" , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : str=0.1 , _lowerCamelCase : Any=5_12 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : Optional[Any]=1E-5 , _lowerCamelCase : Union[str, Any]=1 , _lowerCamelCase : Any=0 , _lowerCamelCase : int=2 , _lowerCamelCase : Union[str, Any]=10_24 , _lowerCamelCase : Dict=1_28 , _lowerCamelCase : int=1_28 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : int=32 , _lowerCamelCase : int=1_28 , _lowerCamelCase : Tuple=64 , _lowerCamelCase : List[Any]=2_56 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : List[str]=True , _lowerCamelCase : Tuple=2_24 , _lowerCamelCase : List[Any]=3 , _lowerCamelCase : Dict=16 , _lowerCamelCase : Any=None , **_lowerCamelCase : List[str] , ):
"""simple docstring"""
super().__init__(
vocab_size=_lowerCamelCase , hidden_size=_lowerCamelCase , num_hidden_layers=_lowerCamelCase , num_attention_heads=_lowerCamelCase , intermediate_size=_lowerCamelCase , hidden_act=_lowerCamelCase , hidden_dropout_prob=_lowerCamelCase , attention_probs_dropout_prob=_lowerCamelCase , max_position_embeddings=_lowerCamelCase , type_vocab_size=_lowerCamelCase , initializer_range=_lowerCamelCase , layer_norm_eps=_lowerCamelCase , pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase , )
A_ : List[Any] = max_ad_position_embeddings
A_ : List[str] = coordinate_size
A_ : Tuple = shape_size
A_ : Optional[Any] = has_relative_attention_bias
A_ : Any = rel_pos_bins
A_ : str = max_rel_pos
A_ : Optional[int] = has_spatial_attention_bias
A_ : int = rel_ad_pos_bins
A_ : Tuple = max_rel_ad_pos
A_ : int = text_embed
A_ : List[Any] = visual_embed
A_ : str = input_size
A_ : Dict = num_channels
A_ : Optional[int] = patch_size
A_ : Dict = classifier_dropout
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : Optional[Any] = version.parse("""1.12""")
@property
def a_ ( self : Tuple ):
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def a_ ( self : int ):
"""simple docstring"""
return 1E-5
@property
def a_ ( self : Optional[int] ):
"""simple docstring"""
return 12
def a_ ( self : Optional[int] , _lowerCamelCase : "ProcessorMixin" , _lowerCamelCase : int = -1 , _lowerCamelCase : int = -1 , _lowerCamelCase : bool = False , _lowerCamelCase : Optional["TensorType"] = None , _lowerCamelCase : int = 3 , _lowerCamelCase : int = 40 , _lowerCamelCase : int = 40 , ):
"""simple docstring"""
setattr(processor.image_processor , '''apply_ocr''' , _lowerCamelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A_ : Tuple = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A_ : Tuple = processor.tokenizer.num_special_tokens_to_add(_lowerCamelCase )
A_ : List[Any] = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
A_ : int = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
A_ : Optional[int] = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
A_ : str = self._generate_dummy_images(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : Union[str, Any] = dict(
processor(
_lowerCamelCase , text=_lowerCamelCase , boxes=_lowerCamelCase , return_tensors=_lowerCamelCase , ) )
return inputs
| 167 | 1 |
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_snake_case = logging.get_logger(__name__)
class a__ :
def __init__( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Dict = question_encoder
_lowercase : List[str] = generator
_lowercase : Optional[Any] = self.question_encoder
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
if os.path.isfile(_UpperCamelCase ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
_lowercase : Dict = os.path.join(_UpperCamelCase , "question_encoder_tokenizer" )
_lowercase : str = os.path.join(_UpperCamelCase , "generator_tokenizer" )
self.question_encoder.save_pretrained(_UpperCamelCase )
self.generator.save_pretrained(_UpperCamelCase )
@classmethod
def _lowerCamelCase ( cls , _UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
_lowercase : str = kwargs.pop("config" , _UpperCamelCase )
if config is None:
_lowercase : List[str] = RagConfig.from_pretrained(_UpperCamelCase )
_lowercase : Tuple = AutoTokenizer.from_pretrained(
_UpperCamelCase , config=config.question_encoder , subfolder="question_encoder_tokenizer" )
_lowercase : int = AutoTokenizer.from_pretrained(
_UpperCamelCase , config=config.generator , subfolder="generator_tokenizer" )
return cls(question_encoder=_UpperCamelCase , generator=_UpperCamelCase )
def __call__( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.current_tokenizer(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.generator.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.generator.decode(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[int] = self.question_encoder
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = self.generator
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = "longest" , _UpperCamelCase = None , _UpperCamelCase = True , **_UpperCamelCase , ):
"""simple docstring"""
warnings.warn(
"`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the "
"regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` "
"context manager to prepare your targets. See the documentation of your specific tokenizer for more "
"details" , _UpperCamelCase , )
if max_length is None:
_lowercase : Any = self.current_tokenizer.model_max_length
_lowercase : Dict = self(
_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_tensors=_UpperCamelCase , max_length=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , **_UpperCamelCase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowercase : List[Any] = self.current_tokenizer.model_max_length
_lowercase : Tuple = self(
text_target=_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_tensors=_UpperCamelCase , padding=_UpperCamelCase , max_length=_UpperCamelCase , truncation=_UpperCamelCase , **_UpperCamelCase , )
_lowercase : Optional[int] = labels["input_ids"]
return model_inputs
| 199 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Dict = StableUnCLIPImgaImgPipeline
_SCREAMING_SNAKE_CASE : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_SCREAMING_SNAKE_CASE : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_SCREAMING_SNAKE_CASE : List[Any] = frozenset([] )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = 32
_lowercase : Any = embedder_hidden_size
# image encoding components
_lowercase : Optional[int] = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
_lowercase : Dict = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=_UpperCamelCase , projection_dim=_UpperCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
_lowercase : str = StableUnCLIPImageNormalizer(embedding_dim=_UpperCamelCase )
_lowercase : Dict = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
_lowercase : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
_lowercase : Optional[Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_lowercase : List[str] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_UpperCamelCase , layers_per_block=1 , upcast_attention=_UpperCamelCase , use_linear_projection=_UpperCamelCase , )
torch.manual_seed(0 )
_lowercase : int = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="v_prediction" , set_alpha_to_one=_UpperCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
_lowercase : Dict = AutoencoderKL()
_lowercase : int = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase=0 , _UpperCamelCase=True ):
"""simple docstring"""
if str(_UpperCamelCase ).startswith("mps" ):
_lowercase : List[Any] = torch.manual_seed(_UpperCamelCase )
else:
_lowercase : List[str] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
_lowercase : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
if pil_image:
_lowercase : Optional[Any] = input_image * 0.5 + 0.5
_lowercase : Optional[int] = input_image.clamp(0 , 1 )
_lowercase : str = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_lowercase : int = DiffusionPipeline.numpy_to_pil(_UpperCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowercase : List[str] = self.get_dummy_components()
_lowercase : str = StableUnCLIPImgaImgPipeline(**_UpperCamelCase )
_lowercase : int = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
_lowercase : str = self.get_dummy_inputs(_UpperCamelCase )
inputs.update({"image_embeds": None} )
_lowercase : Tuple = sd_pipe(**_UpperCamelCase ).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase : Tuple = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=_UpperCamelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_UpperCamelCase )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
_lowercase : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
_lowercase : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowercase : int = torch.Generator(device="cpu" ).manual_seed(0 )
_lowercase : List[Any] = pipe(_UpperCamelCase , "anime turle" , generator=_UpperCamelCase , output_type="np" )
_lowercase : str = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
_lowercase : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
_lowercase : Any = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowercase : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
_lowercase : Any = pipe(_UpperCamelCase , "anime turle" , generator=_UpperCamelCase , output_type="np" )
_lowercase : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowercase : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
_lowercase : Tuple = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowercase : Optional[int] = pipe(
_UpperCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
_lowercase : str = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 199 | 1 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a__ ( snake_case__ ):
def __init__( self , _A , _A = None , _A = None , _A = None , _A = False , _A = False , _A = None , _A = None , **_A , ):
"""simple docstring"""
super().__init__(
_A , split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , )
__lowerCAmelCase = field
__lowerCAmelCase = path_or_paths if isinstance(_A , _A ) else {self.split: path_or_paths}
__lowerCAmelCase = Json(
cache_dir=_A , data_files=_A , features=_A , field=_A , **_A , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if self.streaming:
__lowerCAmelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , )
__lowerCAmelCase = self.builder.as_dataset(
split=self.split , verification_mode=_A , in_memory=self.keep_in_memory )
return dataset
class a__ :
def __init__( self , _A , _A , _A = None , _A = None , **_A , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
__lowerCAmelCase = dataset
__lowerCAmelCase = path_or_buf
__lowerCAmelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__lowerCAmelCase = num_proc
__lowerCAmelCase = "utf-8"
__lowerCAmelCase = to_json_kwargs
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.to_json_kwargs.pop("path_or_buf" , _A )
__lowerCAmelCase = self.to_json_kwargs.pop("orient" , "records" )
__lowerCAmelCase = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
__lowerCAmelCase = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
__lowerCAmelCase = self.to_json_kwargs.pop("compression" , _A )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=_A ) as buffer:
__lowerCAmelCase = self._write(file_obj=_A , orient=_A , lines=_A , index=_A , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
" was passed. Please provide a local path instead." )
__lowerCAmelCase = self._write(
file_obj=self.path_or_buf , orient=_A , lines=_A , index=_A , **self.to_json_kwargs )
return written
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = args
__lowerCAmelCase = query_table(
table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , )
__lowerCAmelCase = batch.to_pandas().to_json(
path_or_buf=_A , orient=_A , lines=_A , index=_A , **_A )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , **_A , ):
"""simple docstring"""
__lowerCAmelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
__lowerCAmelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_A )
else:
__lowerCAmelCase , __lowerCAmelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(_A )
return written
| 92 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __magic_name__ ( lowercase=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE_: Dict =subparsers.add_parser("""test""" )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=lowercase , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase )
return parser
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
SCREAMING_SNAKE_CASE_: Any =script_name
else:
SCREAMING_SNAKE_CASE_: int =f'''--config_file={args.config_file} {script_name}'''
SCREAMING_SNAKE_CASE_: Any =["""accelerate-launch"""] + test_args.split()
SCREAMING_SNAKE_CASE_: Dict =execute_subprocess_async(lowercase , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] =test_command_parser()
SCREAMING_SNAKE_CASE_: List[str] =parser.parse_args()
test_command(lowercase )
if __name__ == "__main__":
main()
| 173 | 0 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (DDIMParallelScheduler,)
SCREAMING_SNAKE_CASE = (('eta', 0.0), ('num_inference_steps', 5_0))
def _a (self , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**_lowerCamelCase )
return config
def _a (self , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = self.scheduler_classes[0]
UpperCAmelCase__ : List[str] = self.get_scheduler_config(**_lowerCamelCase )
UpperCAmelCase__ : Tuple = scheduler_class(**_lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : str = 10, 0.0
UpperCAmelCase__ : Tuple = self.dummy_model()
UpperCAmelCase__ : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCamelCase )
for t in scheduler.timesteps:
UpperCAmelCase__ : Optional[Any] = model(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : str = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample
return sample
def _a (self ):
"""simple docstring"""
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowerCamelCase )
UpperCAmelCase__ : str = self.scheduler_classes[0]
UpperCAmelCase__ : Optional[Any] = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def _a (self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
self.check_over_configs(thresholding=_lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCamelCase , prediction_type=_lowerCamelCase , sample_max_value=_lowerCamelCase , )
def _a (self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=_lowerCamelCase , num_inference_steps=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_lowerCamelCase , eta=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.scheduler_classes[0]
UpperCAmelCase__ : Dict = self.get_scheduler_config()
UpperCAmelCase__ : Any = scheduler_class(**_lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = self.scheduler_classes[0]
UpperCAmelCase__ : str = self.get_scheduler_config()
UpperCAmelCase__ : int = scheduler_class(**_lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = 10, 0.0
scheduler.set_timesteps(_lowerCamelCase )
UpperCAmelCase__ : Tuple = self.dummy_model()
UpperCAmelCase__ : Optional[Any] = self.dummy_sample_deter
UpperCAmelCase__ : str = self.dummy_sample_deter + 0.1
UpperCAmelCase__ : Any = self.dummy_sample_deter - 0.1
UpperCAmelCase__ : Tuple = samplea.shape[0]
UpperCAmelCase__ : Dict = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCAmelCase__ : int = torch.arange(_lowerCamelCase )[0:3, None].repeat(1 , _lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCAmelCase__ : int = scheduler.batch_step_no_noise(_lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _lowerCamelCase )
UpperCAmelCase__ : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : List[str] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.full_loop()
UpperCAmelCase__ : Optional[int] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : int = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = self.full_loop(prediction_type="""v_prediction""" )
UpperCAmelCase__ : Optional[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : List[str] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = self.full_loop(set_alpha_to_one=_lowerCamelCase , beta_start=0.01 )
UpperCAmelCase__ : List[str] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.full_loop(set_alpha_to_one=_lowerCamelCase , beta_start=0.01 )
UpperCAmelCase__ : Optional[int] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 166 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_A = None
_A = logging.get_logger(__name__)
_A = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_A = {
"""vocab_file""": {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/spiece.model""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json""",
},
}
_A = {
"""google/fnet-base""": 5_12,
"""google/fnet-large""": 5_12,
}
_A = """▁"""
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['input_ids', 'token_type_ids']
SCREAMING_SNAKE_CASE = FNetTokenizer
def __init__(self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="<unk>" , _lowerCamelCase="[SEP]" , _lowerCamelCase="<pad>" , _lowerCamelCase="[CLS]" , _lowerCamelCase="[MASK]" , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = (
AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase , normalized=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase )
else mask_token
)
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , remove_space=_lowerCamelCase , keep_accents=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , **_lowerCamelCase , )
UpperCAmelCase__ : Optional[int] = do_lower_case
UpperCAmelCase__ : List[str] = remove_space
UpperCAmelCase__ : Optional[Any] = keep_accents
UpperCAmelCase__ : List[str] = vocab_file
UpperCAmelCase__ : Optional[int] = False if not self.vocab_file else True
def _a (self , _lowerCamelCase , _lowerCamelCase = None ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a (self , _lowerCamelCase , _lowerCamelCase = None ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = [self.sep_token_id]
UpperCAmelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a (self , _lowerCamelCase , _lowerCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(_lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ : List[str] = os.path.join(
_lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file , _lowerCamelCase )
return (out_vocab_file,)
| 166 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__snake_case = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''LayoutLMv2FeatureExtractor''']
__snake_case = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 |
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : int ) -> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_UpperCAmelCase : Optional[Any] = 1
_UpperCAmelCase : List[str] = 1
while repunit:
_UpperCAmelCase : Tuple = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCamelCase ( _lowerCAmelCase : int = 1000000 ) -> int:
_UpperCAmelCase : Any = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCAmelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'''{solution() = }''')
| 246 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__lowercase: List[Any] = 0
__lowercase: Any = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowercase: str = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__lowercase: int = tuple[int, int]
class UpperCAmelCase :
def __init__( self : List[str], a_ : int, a_ : int, a_ : int, a_ : int, a_ : int, a_ : Node | None, ):
"""simple docstring"""
UpperCamelCase__ = pos_x
UpperCamelCase__ = pos_y
UpperCamelCase__ = (pos_y, pos_x)
UpperCamelCase__ = goal_x
UpperCamelCase__ = goal_y
UpperCamelCase__ = g_cost
UpperCamelCase__ = parent
UpperCamelCase__ = self.calculate_heuristic()
UpperCamelCase__ = self.g_cost + self.h_cost
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = self.pos_x - self.goal_x
UpperCamelCase__ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowercase__ ) + abs(lowercase__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Dict, a_ : Node ):
"""simple docstring"""
return self.f_cost < other.f_cost
class UpperCAmelCase :
def __init__( self : str, a_ : TPosition, a_ : TPosition ):
"""simple docstring"""
UpperCamelCase__ = Node(start[1], start[0], goal[1], goal[0], 0, lowercase__ )
UpperCamelCase__ = Node(goal[1], goal[0], goal[1], goal[0], 9_9999, lowercase__ )
UpperCamelCase__ = [self.start]
UpperCamelCase__ = []
UpperCamelCase__ = False
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCamelCase__ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowercase__ )
self.closed_nodes.append(lowercase__ )
UpperCamelCase__ = self.get_successors(lowercase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowercase__ )
else:
# retrieve the best current path
UpperCamelCase__ = self.open_nodes.pop(self.open_nodes.index(lowercase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowercase__ )
else:
self.open_nodes.append(lowercase__ )
return [self.start.pos]
def lowercase_ ( self : Union[str, Any], a_ : Node ):
"""simple docstring"""
UpperCamelCase__ = []
for action in delta:
UpperCamelCase__ = parent.pos_x + action[1]
UpperCamelCase__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowercase__, lowercase__, self.target.pos_y, self.target.pos_x, parent.g_cost + 1, lowercase__, ) )
return successors
def lowercase_ ( self : Any, a_ : Node | None ):
"""simple docstring"""
UpperCamelCase__ = node
UpperCamelCase__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCamelCase__ = current_node.parent
path.reverse()
return path
class UpperCAmelCase :
def __init__( self : Optional[int], a_ : TPosition, a_ : TPosition ):
"""simple docstring"""
UpperCamelCase__ = AStar(lowercase__, lowercase__ )
UpperCamelCase__ = AStar(lowercase__, lowercase__ )
UpperCamelCase__ = False
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCamelCase__ = self.fwd_astar.open_nodes.pop(0 )
UpperCamelCase__ = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowercase__, lowercase__ )
self.fwd_astar.closed_nodes.append(lowercase__ )
self.bwd_astar.closed_nodes.append(lowercase__ )
UpperCamelCase__ = current_bwd_node
UpperCamelCase__ = current_fwd_node
UpperCamelCase__ = {
self.fwd_astar: self.fwd_astar.get_successors(lowercase__ ),
self.bwd_astar: self.bwd_astar.get_successors(lowercase__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowercase__ )
else:
# retrieve the best current path
UpperCamelCase__ = astar.open_nodes.pop(
astar.open_nodes.index(lowercase__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowercase__ )
else:
astar.open_nodes.append(lowercase__ )
return [self.fwd_astar.start.pos]
def lowercase_ ( self : Tuple, a_ : Node, a_ : Node ):
"""simple docstring"""
UpperCamelCase__ = self.fwd_astar.retrace_path(lowercase__ )
UpperCamelCase__ = self.bwd_astar.retrace_path(lowercase__ )
bwd_path.pop()
bwd_path.reverse()
UpperCamelCase__ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__lowercase: int = (0, 0)
__lowercase: str = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowercase: Optional[Any] = time.time()
__lowercase: List[str] = AStar(init, goal)
__lowercase: Tuple = a_star.search()
__lowercase: Dict = time.time() - start_time
print(F"""AStar execution time = {end_time:f} seconds""")
__lowercase: Union[str, Any] = time.time()
__lowercase: str = BidirectionalAStar(init, goal)
__lowercase: List[Any] = time.time() - bd_start_time
print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 357 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = CLIPTokenizer
_lowerCamelCase : Dict = CLIPTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = False
def lowercase_ ( self : Tuple ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def lowercase_ ( self : Optional[Any], **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : str, **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : List[Any], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
@require_ftfy
def lowercase_ ( self : Dict ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'{text_of_1_token} {text_of_1_token}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )), )
UpperCamelCase__ = f' {text}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )), )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(a_ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass | 31 | 0 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase_ ( __UpperCAmelCase ) -> Union[str, Any]:
for param in module.parameters():
lowerCAmelCase__ : int = False
def lowercase_ ( ) -> Any:
lowerCAmelCase__ : Tuple = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCAmelCase__ : str = '''mps'''
if device == "mps":
print(
"""WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues"""
""" with generations.""" )
return device
def lowercase_ ( __UpperCAmelCase ) -> Any:
lowerCAmelCase__ : Dict = plt.imshow(_snake_case )
fig.axes.get_xaxis().set_visible(_snake_case )
fig.axes.get_yaxis().set_visible(_snake_case )
plt.show()
def lowercase_ ( ) -> List[Any]:
lowerCAmelCase__ : Optional[Any] = datetime.now()
lowerCAmelCase__ : str = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 242 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='yolos'
def __init__(self , a_=7_68 , a_=12 , a_=12 , a_=30_72 , a_="gelu" , a_=0.0 , a_=0.0 , a_=0.02 , a_=1E-12 , a_=[5_12, 8_64] , a_=16 , a_=3 , a_=True , a_=1_00 , a_=True , a_=False , a_=1 , a_=5 , a_=2 , a_=5 , a_=2 , a_=0.1 , **a_ , ):
'''simple docstring'''
super().__init__(**a_ )
__snake_case : Union[str, Any] = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : List[str] = hidden_act
__snake_case : Tuple = hidden_dropout_prob
__snake_case : Optional[int] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = initializer_range
__snake_case : Tuple = layer_norm_eps
__snake_case : List[Any] = image_size
__snake_case : Tuple = patch_size
__snake_case : str = num_channels
__snake_case : Tuple = qkv_bias
__snake_case : Union[str, Any] = num_detection_tokens
__snake_case : List[str] = use_mid_position_embeddings
__snake_case : Tuple = auxiliary_loss
# Hungarian matcher
__snake_case : List[str] = class_cost
__snake_case : int = bbox_cost
__snake_case : int = giou_cost
# Loss coefficients
__snake_case : Optional[int] = bbox_loss_coefficient
__snake_case : List[str] = giou_loss_coefficient
__snake_case : List[Any] = eos_coefficient
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 1E-4
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 12
| 102 | 0 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : Dict = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> Tuple:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
a = k.replace(__UpperCamelCase , __UpperCamelCase)
if k.startswith("encoder"):
a = k.replace(".attn" , ".self_attn")
a = k.replace("norm1" , "self_attn_layer_norm")
a = k.replace("norm2" , "final_layer_norm")
elif k.startswith("decoder"):
a = k.replace("norm1" , "self_attn_layer_norm")
a = k.replace("norm2" , "encoder_attn_layer_norm")
a = k.replace("norm3" , "final_layer_norm")
return k
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> str:
a = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
a = sd.pop(__UpperCamelCase)
a = k.replace("layernorm_embedding" , "layer_norm")
assert new_k not in sd
a = v
lowercase__ : Optional[Any] = ["START"]
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> int:
a = torch.load(__UpperCamelCase , map_location="cpu")
a = model["model"]
a = BlenderbotConfig.from_json_file(__UpperCamelCase)
a = BlenderbotForConditionalGeneration(__UpperCamelCase)
a = m.model.state_dict().keys()
a = []
a = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
a = rename_state_dict_key(__UpperCamelCase)
if new_k not in valid_keys:
failures.append([k, new_k])
else:
a = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__UpperCamelCase)
m.model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase)
m.half()
m.save_pretrained(__UpperCamelCase)
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
lowercase__ : str = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 180 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : str = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class a__ ( UpperCamelCase__ ):
a : Optional[int] = """table-transformer"""
a : Tuple = ["""past_key_values"""]
a : int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A=True , A=None , A=3 , A=100 , A=6 , A=2048 , A=8 , A=6 , A=2048 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=256 , A=0.1 , A=0.0 , A=0.0 , A=0.0_2 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> List[str]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
a = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(A , A ):
a = backbone_config.get("model_type" )
a = CONFIG_MAPPING[backbone_model_type]
a = config_class.from_dict(A )
# set timm attributes to None
a , a , a = None, None, None
a = use_timm_backbone
a = backbone_config
a = num_channels
a = num_queries
a = d_model
a = encoder_ffn_dim
a = encoder_layers
a = encoder_attention_heads
a = decoder_ffn_dim
a = decoder_layers
a = decoder_attention_heads
a = dropout
a = attention_dropout
a = activation_dropout
a = activation_function
a = init_std
a = init_xavier_std
a = encoder_layerdrop
a = decoder_layerdrop
a = encoder_layers
a = auxiliary_loss
a = position_embedding_type
a = backbone
a = use_pretrained_backbone
a = dilation
# Hungarian matcher
a = class_cost
a = bbox_cost
a = giou_cost
# Loss coefficients
a = mask_loss_coefficient
a = dice_loss_coefficient
a = bbox_loss_coefficient
a = giou_loss_coefficient
a = eos_coefficient
super().__init__(is_encoder_decoder=A , **A )
@property
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
return self.d_model
class a__ ( UpperCamelCase__ ):
a : Any = version.parse("""1.11""" )
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCAmelCase_ ( self ) -> float:
'''simple docstring'''
return 1e-5
@property
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
return 12
| 180 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "biogpt"
def __init__( self , UpperCAmelCase=4_2384 , UpperCAmelCase=1024 , UpperCAmelCase=24 , UpperCAmelCase=16 , UpperCAmelCase=4096 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1024 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , **UpperCAmelCase , ):
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = scale_embedding
_UpperCAmelCase = use_cache
_UpperCAmelCase = layerdrop
_UpperCAmelCase = activation_dropout
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
| 39 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : Optional[int] = {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class __a (lowerCamelCase ):
__a : Union[str, Any] = "lilt"
def __init__( self : Any , __magic_name__ : Tuple=3_05_22 , __magic_name__ : str=7_68 , __magic_name__ : Tuple=12 , __magic_name__ : int=12 , __magic_name__ : str=30_72 , __magic_name__ : List[Any]="gelu" , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : int=0.1 , __magic_name__ : Any=5_12 , __magic_name__ : List[Any]=2 , __magic_name__ : Dict=0.0_2 , __magic_name__ : List[Any]=1E-12 , __magic_name__ : List[str]=0 , __magic_name__ : List[str]="absolute" , __magic_name__ : str=None , __magic_name__ : Dict=4 , __magic_name__ : str=10_24 , **__magic_name__ : Optional[Any] , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Tuple = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Any = layer_norm_eps
UpperCAmelCase_ : int = position_embedding_type
UpperCAmelCase_ : Tuple = classifier_dropout
UpperCAmelCase_ : Dict = channel_shrink_ratio
UpperCAmelCase_ : int = max_ad_position_embeddings
| 125 | 0 |
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
A : Dict = re.compile(R'^(?P<major>\d+)' R'\.(?P<minor>\d+)' R'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class __A:
snake_case_ = 42
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a , __a , __a = _str_to_version_tuple(self.version_str )
def __repr__( self ) -> Dict:
'''simple docstring'''
return F"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
return self.major, self.minor, self.patch
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]:
'''simple docstring'''
if isinstance(_snake_case , _snake_case ):
return Version(_snake_case )
elif isinstance(_snake_case , _snake_case ):
return other
raise TypeError(F"""{other} (type {type(_snake_case )}) cannot be compared to version.""" )
def __eq__( self , _snake_case ) -> Dict:
'''simple docstring'''
try:
__a = self._validate_operand(_snake_case )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , _snake_case ) -> Tuple:
'''simple docstring'''
__a = self._validate_operand(_snake_case )
return self.tuple < other.tuple
def __hash__( self ) -> str:
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
return self.version_str
def __lowerCAmelCase ( a__ ) -> List[str]:
__a = _VERSION_REG.match(a__ )
if not res:
raise ValueError(F"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(a__ ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def __lowerCAmelCase ( a__ ) -> Optional[Any]:
return ".".join(str(a__ ) for v in version_tuple ) | 33 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A : str = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ['PerceiverFeatureExtractor']
A : int = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 33 | 1 |
"""simple docstring"""
_lowerCAmelCase :Tuple = [0, 2, 4, 6, 8]
_lowerCAmelCase :str = [1, 3, 5, 7, 9]
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_UpperCAmelCase : Optional[int] = 0
for digit in range(10 ):
_UpperCAmelCase : Tuple = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , UpperCamelCase__ , UpperCamelCase__ )
return result
_UpperCAmelCase : Optional[Any] = 0
for digita in range(10 ):
_UpperCAmelCase : Optional[Any] = digita
if (remainder + digita) % 2 == 0:
_UpperCAmelCase : List[Any] = ODD_DIGITS
else:
_UpperCAmelCase : List[str] = EVEN_DIGITS
for digita in other_parity_digits:
_UpperCAmelCase : Optional[Any] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , UpperCamelCase__ , UpperCamelCase__ , )
return result
def lowerCamelCase_ (UpperCamelCase__ : int = 9 ):
_UpperCAmelCase : Union[str, Any] = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(UpperCamelCase__ , 0 , [0] * length , UpperCamelCase__ )
return result
if __name__ == "__main__":
print(f"{solution() = }")
| 263 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool , UpperCamelCase__ : list[int] , UpperCamelCase__ : float ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(UpperCamelCase__ ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
def lowerCamelCase_ ():
_UpperCAmelCase : Any = [90, 23, 6, 33, 21, 65, 123, 3_4423]
_UpperCAmelCase : Any = math.log(len(UpperCamelCase__ ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 263 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase : int =OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
UpperCAmelCase : Optional[int] =key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
UpperCAmelCase : Optional[Any] =key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCAmelCase : Dict =key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
UpperCAmelCase : Tuple =key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(__lowerCAmelCase )-1}''' )
if "norm" in key:
UpperCAmelCase : Dict =key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCAmelCase : Dict =key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
UpperCAmelCase : List[str] =key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(__lowerCAmelCase )-1}''' )
if "layer_norm1" in key:
UpperCAmelCase : Tuple =key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
UpperCAmelCase : Optional[int] =key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
UpperCAmelCase : Tuple =key[key.find('''block''' ) + len('''block''' )]
UpperCAmelCase : Optional[int] =key.replace(f'''block{idx}''' , f'''block.{int(__lowerCAmelCase )-1}''' )
if "attn.q" in key:
UpperCAmelCase : Dict =key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
UpperCAmelCase : str =key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
UpperCAmelCase : Union[str, Any] =key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
UpperCAmelCase : List[Any] =key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
UpperCAmelCase : List[str] =key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
UpperCAmelCase : Any =key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
UpperCAmelCase : int =key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
UpperCAmelCase : Optional[int] =key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCAmelCase : Tuple =key[key.find('''linear_c''' ) + len('''linear_c''' )]
UpperCAmelCase : Any =key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(__lowerCAmelCase )-1}''' )
if "bot_conv" in key:
UpperCAmelCase : List[str] =key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
UpperCAmelCase : Dict =key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
UpperCAmelCase : int =key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
UpperCAmelCase : Optional[int] =key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
UpperCAmelCase : List[Any] =key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
UpperCAmelCase : Union[str, Any] =key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
UpperCAmelCase : Dict =key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
UpperCAmelCase : Optional[Any] =key.replace('''module.last_layer_depth''' , '''head.head''' )
UpperCAmelCase : int =value
return new_state_dict
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> List[Any]:
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCAmelCase : Optional[Any] =state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
UpperCAmelCase : int =state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
UpperCAmelCase : Union[str, Any] =kv_weight[
: config.hidden_sizes[i], :
]
UpperCAmelCase : Optional[int] =kv_bias[: config.hidden_sizes[i]]
UpperCAmelCase : str =kv_weight[
config.hidden_sizes[i] :, :
]
UpperCAmelCase : Tuple =kv_bias[config.hidden_sizes[i] :]
def lowerCAmelCase_ ( )-> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[int] ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : int =Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return image
@torch.no_grad()
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=None )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Dict =GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
UpperCAmelCase : Dict =GLPNImageProcessor()
# prepare image
UpperCAmelCase : Union[str, Any] =prepare_img()
UpperCAmelCase : List[Any] =image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
UpperCAmelCase : str =torch.load(__lowerCAmelCase , map_location=torch.device('''cpu''' ) )
# rename keys
UpperCAmelCase : int =rename_keys(__lowerCAmelCase )
# key and value matrices need special treatment
read_in_k_v(__lowerCAmelCase , __lowerCAmelCase )
# create HuggingFace model and load state dict
UpperCAmelCase : Tuple =GLPNForDepthEstimation(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# forward pass
UpperCAmelCase : int =model(__lowerCAmelCase )
UpperCAmelCase : List[str] =outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCAmelCase : str =torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCAmelCase : Optional[Any] =torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
UpperCAmelCase : Tuple =torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , __lowerCAmelCase , atol=1e-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(__lowerCAmelCase , __lowerCAmelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=__lowerCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(__lowerCAmelCase , __lowerCAmelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=__lowerCAmelCase , )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''',
default=None,
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
parser.add_argument(
'''--model_name''',
default='''glpn-kitti''',
type=str,
help='''Name of the model in case you\'re pushing to the hub.''',
)
__snake_case = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 78 | import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __snake_case ( unittest.TestCase ):
__lowerCamelCase : Dict = MODEL_FOR_MASKED_LM_MAPPING
__lowerCamelCase : Optional[int] = TF_MODEL_FOR_MASKED_LM_MAPPING
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[Any] =pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''' )
UpperCAmelCase : List[Any] =unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(snake_case__ , decimals=6 ) , [
{'''sequence''': '''My name is grouped''', '''score''': 2.1e-05, '''token''': 3_8015, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1e-05, '''token''': 2_5506, '''token_str''': ''' accuser'''},
] , )
UpperCAmelCase : Tuple =unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(snake_case__ , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1e-05,
'''token''': 3_8015,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1e-05,
'''token''': 2_5506,
'''token_str''': ''' accuser''',
},
] , )
UpperCAmelCase : Dict =unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=6 ) , [
{'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 1_3606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2e-05, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9e-05, '''token''': 2941, '''token_str''': ''' Te'''},
] , )
@require_torch
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Any =pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''' )
UpperCAmelCase : List[Any] =unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(snake_case__ , decimals=6 ) , [
{'''sequence''': '''My name is Maul''', '''score''': 2.2e-05, '''token''': 3_5676, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2e-05, '''token''': 1_6416, '''token_str''': '''ELS'''},
] , )
UpperCAmelCase : Union[str, Any] =unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(snake_case__ , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2e-05,
'''token''': 3_5676,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2e-05, '''token''': 1_6416, '''token_str''': '''ELS'''},
] , )
UpperCAmelCase : int =unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=6 ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 2.1e-05, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2e-05, '''token''': 2941, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 1_3606, '''token_str''': ''' Clara'''},
] , )
UpperCAmelCase : List[str] =unmasker('''My name is <mask> <mask>''' , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=6 ) , [
[
{
'''score''': 2.2e-05,
'''token''': 3_5676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2e-05, '''token''': 1_6416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2e-05,
'''token''': 3_5676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2e-05, '''token''': 1_6416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
] , )
@require_torch_gpu
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Optional[int] =pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''' )
# convert model to fp16
pipe.model.half()
UpperCAmelCase : Dict =pipe('''Paris is the [MASK] of France.''' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(snake_case__ , snake_case__ )
@slow
@require_torch
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : str =pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''' )
self.run_large_test(snake_case__ )
@slow
@require_tf
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''' )
self.run_large_test(snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ ) -> int:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(snake_case__ ) , [
{'''sequence''': '''My name is John''', '''score''': 0.008, '''token''': 610, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.007, '''token''': 1573, '''token_str''': ''' Chris'''},
] , )
UpperCAmelCase : int =unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(snake_case__ ) , [
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.251,
'''token''': 2201,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.214,
'''token''': 1_2790,
'''token_str''': ''' Lyon''',
},
] , )
UpperCAmelCase : int =unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case__ ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 0.005, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.000, '''token''': 1_3606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.000, '''token''': 2941, '''token_str''': ''' Te'''},
] , )
@require_torch
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Any =pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''' )
UpperCAmelCase : List[str] =None
UpperCAmelCase : str =None
self.run_pipeline_test(snake_case__ , [] )
@require_tf
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[str] =pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''' )
UpperCAmelCase : str =None
UpperCAmelCase : Dict =None
self.run_pipeline_test(snake_case__ , [] )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Any:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' )
UpperCAmelCase : Any =FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ )
UpperCAmelCase : int =[
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Any =fill_masker.tokenizer
UpperCAmelCase : Optional[int] =fill_masker.model
UpperCAmelCase : Dict =fill_masker(
f'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
snake_case__ , [
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
] , )
UpperCAmelCase : int =fill_masker([f'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
snake_case__ , [
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
] , )
UpperCAmelCase : Optional[Any] =fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
snake_case__ , [
[
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
],
[
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
],
] , )
with self.assertRaises(snake_case__ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(snake_case__ ):
fill_masker('''This is''' )
self.run_test_top_k(snake_case__ , snake_case__ )
self.run_test_targets(snake_case__ , snake_case__ )
self.run_test_top_k_targets(snake_case__ , snake_case__ )
self.fill_mask_with_duplicate_targets_and_top_k(snake_case__ , snake_case__ )
self.fill_mask_with_multiple_masks(snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[str] =tokenizer.get_vocab()
UpperCAmelCase : List[str] =sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCAmelCase : Any =FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ , targets=snake_case__ )
UpperCAmelCase : Tuple =fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
snake_case__ , [
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
] , )
UpperCAmelCase : int ={vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , snake_case__ )
UpperCAmelCase : Optional[Any] =[tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(snake_case__ ) )
# Call argument
UpperCAmelCase : Union[str, Any] =FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ )
UpperCAmelCase : Optional[int] =fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=snake_case__ )
self.assertEqual(
snake_case__ , [
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
] , )
UpperCAmelCase : Any ={vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , snake_case__ )
UpperCAmelCase : Optional[int] =[tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(snake_case__ ) )
# Score equivalence
UpperCAmelCase : Any =fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=snake_case__ )
UpperCAmelCase : Union[str, Any] =[top_mask['''token_str'''] for top_mask in outputs]
UpperCAmelCase : Optional[int] =[top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(snake_case__ ) == set(snake_case__ ):
UpperCAmelCase : List[Any] =fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=snake_case__ )
UpperCAmelCase : List[str] =[top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(snake_case__ ) , nested_simplify(snake_case__ ) )
# Raises with invalid
with self.assertRaises(snake_case__ ):
UpperCAmelCase : int =fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(snake_case__ ):
UpperCAmelCase : int =fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[''''''] )
with self.assertRaises(snake_case__ ):
UpperCAmelCase : Any =fill_masker(f'''This is a {tokenizer.mask_token}''' , targets='''''' )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[str] =FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ , top_k=2 )
UpperCAmelCase : Optional[Any] =fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
snake_case__ , [
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
] , )
UpperCAmelCase : Any =FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ )
UpperCAmelCase : List[Any] =fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
snake_case__ , [
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
] , )
self.assertEqual(nested_simplify(snake_case__ ) , nested_simplify(snake_case__ ) )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : str =tokenizer.get_vocab()
UpperCAmelCase : int =FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ )
# top_k=2, ntargets=3
UpperCAmelCase : Optional[Any] =sorted(vocab.keys() )[:3]
UpperCAmelCase : str =fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=snake_case__ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCAmelCase : Tuple =[el['''token_str'''] for el in sorted(snake_case__ , key=lambda snake_case__ : x["score"] , reverse=snake_case__ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(snake_case__ ).issubset(snake_case__ ):
UpperCAmelCase : str =fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=snake_case__ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(snake_case__ ) , nested_simplify(snake_case__ ) )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Dict:
'''simple docstring'''
UpperCAmelCase : str =FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ )
UpperCAmelCase : Union[str, Any] =tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCAmelCase : List[Any] =sorted(vocab.keys() )[:3]
UpperCAmelCase : Optional[int] =[targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCAmelCase : str =fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=snake_case__ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(snake_case__ ) , 3 )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any =FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ )
UpperCAmelCase : Union[str, Any] =fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
snake_case__ , [
[
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
],
[
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
],
[
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
{'''sequence''': ANY(snake_case__ ), '''score''': ANY(snake_case__ ), '''token''': ANY(snake_case__ ), '''token_str''': ANY(snake_case__ )},
],
] , )
| 78 | 1 |
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 198 | '''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__a: Tuple = None
__a: Tuple = logging.get_logger(__name__)
__a: Optional[Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__a: Optional[Any] = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""",
},
}
# TODO(PVP) - this should be removed in Transformers v5
__a: Tuple = {
"""t5-small""": 5_12,
"""t5-base""": 5_12,
"""t5-large""": 5_12,
"""t5-3b""": 5_12,
"""t5-11b""": 5_12,
}
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE = TaTokenizer
SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase=100 , __lowerCAmelCase=None , **__lowerCAmelCase , ) -> Union[str, Any]:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowercase__ : Union[str, Any] = [F"""<extra_id_{i}>""" for i in range(__lowerCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase__ : Dict = len(set(filter(lambda __lowerCAmelCase : bool('''extra_id_''' in str(__lowerCAmelCase ) ) , __lowerCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , extra_ids=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase__ : Union[str, Any] = vocab_file
lowercase__ : Optional[int] = False if not self.vocab_file else True
lowercase__ : Any = extra_ids
@staticmethod
def _lowerCAmelCase( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase__ : Any = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , __lowerCAmelCase , )
return max_model_length
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : List[Any] = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file , __lowerCAmelCase )
logger.info(F"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[int]:
lowercase__ : Any = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase__ : Dict = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[int]:
lowercase__ : Optional[int] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCAmelCase( self ) -> List[Any]:
return list(
set(filter(lambda __lowerCAmelCase : bool(re.search(r'''<extra_id_\d+>''' , __lowerCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCAmelCase( self ) -> Tuple:
return [self.convert_tokens_to_ids(__lowerCAmelCase ) for token in self.get_sentinel_tokens()]
| 198 | 1 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class A_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = XLNetTokenizer
__UpperCamelCase = XLNetTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def UpperCAmelCase__ ( self :List[str] ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = XLNetTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self :Any ) -> Dict:
UpperCAmelCase = '<s>'
UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def UpperCAmelCase__ ( self :Any ) -> Tuple:
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<eod>' )
self.assertEqual(len(lowercase_ ) , 10_06 )
def UpperCAmelCase__ ( self :Tuple ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def UpperCAmelCase__ ( self :Any ) -> Tuple:
UpperCAmelCase = XLNetTokenizer(lowercase_ , keep_accents=lowercase_ )
UpperCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , [2_85, 46, 10, 1_70, 3_82] )
UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(lowercase_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def UpperCAmelCase__ ( self :Tuple ) -> List[str]:
UpperCAmelCase = XLNetTokenizer(lowercase_ , do_lower_case=lowercase_ )
UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + '',
'i',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['▁he', 'll', 'o'] )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[Any]:
UpperCAmelCase = XLNetTokenizer(lowercase_ , do_lower_case=lowercase_ )
UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
@slow
def UpperCAmelCase__ ( self :Any ) -> int:
UpperCAmelCase = XLNetTokenizer.from_pretrained('xlnet-base-cased' )
UpperCAmelCase = tokenizer.encode('sequence builders' , add_special_tokens=lowercase_ )
UpperCAmelCase = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase_ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase_ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def UpperCAmelCase__ ( self :Tuple ) -> Optional[Any]:
# fmt: off
UpperCAmelCase = {'input_ids': [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='xlnet-base-cased' , revision='c841166438c31ec7ca9a106dee7bb312b73ae511' , )
| 181 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case_ = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 181 | 1 |
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
UpperCAmelCase_ : List[str] = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
UpperCAmelCase_ : Optional[int] = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = CamembertTokenizer
__UpperCamelCase = CamembertTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ : Optional[int] = CamembertTokenizer(lowercase_)
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = '''<pad>'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_) , lowercase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_) , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''')
self.assertEqual(vocab_keys[1] , '''<pad>''')
self.assertEqual(vocab_keys[-1] , '''<mask>''')
self.assertEqual(len(lowercase_) , 1004)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1005)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = CamembertTokenizer(lowercase_)
tokenizer.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ : Tuple = CamembertTokenizerFast.from_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ : Tuple = '''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.encode(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = rust_tokenizer.encode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Any = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = rust_tokenizer.tokenize(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[int] = '''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = rust_tokenizer.tokenize(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Any = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.encode(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = rust_tokenizer.encode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
@slow
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = {'''input_ids''': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
SCREAMING_SNAKE_CASE_ : Tuple = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=lowercase_ , )
| 91 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def UpperCAmelCase_( a__=None ):
"""simple docstring"""
if subparsers is not None:
SCREAMING_SNAKE_CASE : Tuple = subparsers.add_parser('''test''' )
else:
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=a__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=a__ )
return parser
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
SCREAMING_SNAKE_CASE : int = script_name
else:
SCREAMING_SNAKE_CASE : str = F"""--config_file={args.config_file} {script_name}"""
SCREAMING_SNAKE_CASE : Dict = ['''accelerate-launch'''] + test_args.split()
SCREAMING_SNAKE_CASE : str = execute_subprocess_async(a__ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = test_command_parser()
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
test_command(a__ )
if __name__ == "__main__":
main()
| 19 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : str = logging.get_logger(__name__)
a__ : Optional[Any] = {'''vocab_file''': '''vocab.json'''}
a__ : str = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
a__ : Tuple = {'''mgp-str''': 27}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Dict = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _lowerCamelCase , _lowerCamelCase="[GO]" , _lowerCamelCase="[GO]" , _lowerCamelCase="[s]" , _lowerCamelCase="[GO]" , **_lowerCamelCase ) ->Dict:
super().__init__(
unk_token=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , pad_token=_lowerCamelCase , **_lowerCamelCase , )
with open(_lowerCamelCase , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE : List[Any] = json.load(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = {v: k for k, v in self.vocab.items()}
@property
def __lowerCAmelCase ( self ) ->List[Any]:
return len(self.vocab )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
return dict(self.vocab , **self.added_tokens_encoder )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for s in text:
char_tokens.extend(_lowerCamelCase )
return char_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
return self.vocab.get(_lowerCamelCase , self.vocab.get(self.unk_token ) )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
return self.decoder.get(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
if not os.path.isdir(_lowerCamelCase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowerCamelCase ) )
return
SCREAMING_SNAKE_CASE : str = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_lowerCamelCase , ensure_ascii=_lowerCamelCase ) + '''\n''' )
return (vocab_file,)
| 19 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 127 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : str = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 127 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : Dict = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 371 |
"""simple docstring"""
def lowercase ( _snake_case : int ) ->str:
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
__snake_case : Any = len(bin(_snake_case )[3:] )
__snake_case : List[Any] = bin(abs(_snake_case ) - (1 << binary_number_length) )[3:]
__snake_case : Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(_snake_case ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
'''simple docstring'''
from datetime import datetime
import requests
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
__UpperCamelCase = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(__A ).content
if __name__ == "__main__":
a__ : int = input('Enter Video/IGTV url: ').strip()
a__ : int = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 349 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _lowercase ( __A ,__A ):
'''simple docstring'''
return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__A ,__A ) ) )
def _lowercase ( __A ,__A ):
'''simple docstring'''
if dataset.ndim != value_array.ndim:
__UpperCamelCase = (
"""Wrong input data's dimensions... """
f"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(__A )
try:
if dataset.shape[1] != value_array.shape[1]:
__UpperCamelCase = (
"""Wrong input data's shape... """
f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(__A )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
__UpperCamelCase = (
"""Input data have different datatype... """
f"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(__A )
__UpperCamelCase = []
for value in value_array:
__UpperCamelCase = euclidean(__A ,dataset[0] )
__UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
__UpperCamelCase = euclidean(__A ,__A )
if dist > temp_dist:
__UpperCamelCase = temp_dist
__UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _lowercase ( __A ,__A ):
'''simple docstring'''
return np.dot(__A ,__A ) / (norm(__A ) * norm(__A ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 1 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> int:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def __A ( self ) -> Union[str, Any]:
A_ = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Tuple:
A_ = self._create_example_records()
A_ = Dataset.from_list(_SCREAMING_SNAKE_CASE )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(_SCREAMING_SNAKE_CASE ):
self.assertDictEqual(_SCREAMING_SNAKE_CASE , example_records[i] )
def __A ( self ) -> Optional[int]:
A_ = self._create_example_records()
A_ = Dataset.from_list(_SCREAMING_SNAKE_CASE )
A_ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def __A ( self ) -> str: # checks what happens with missing columns
A_ = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
A_ = Dataset.from_list(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def __A ( self ) -> Union[str, Any]: # checks if the type can be inferred from the second record
A_ = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
A_ = Dataset.from_list(_SCREAMING_SNAKE_CASE )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def __A ( self ) -> Union[str, Any]:
A_ = Dataset.from_list([] )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 366 | '''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __UpperCAmelCase :
'''simple docstring'''
pass
| 18 | 0 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class A :
def __init__( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =''
_lowerCamelCase : Union[str, Any] =''
_lowerCamelCase : Dict =[]
_lowerCamelCase : Union[str, Any] =0
_lowerCamelCase : Optional[int] =256
_lowerCamelCase : Tuple =0
_lowerCamelCase : Tuple =0
_lowerCamelCase : Optional[int] =0
_lowerCamelCase : int =0
def lowerCamelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : List[str] =cva.imread(lowercase_ , 0 )
_lowerCamelCase : List[Any] =copy.deepcopy(self.img )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int =plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' )
_lowerCamelCase : int =np.sum(lowercase_ )
for i in range(len(lowercase_ ) ):
_lowerCamelCase : int =x[i] / self.k
self.sk += prk
_lowerCamelCase : int =(self.L - 1) * self.sk
if self.rem != 0:
_lowerCamelCase : Tuple =int(last % last )
_lowerCamelCase : int =int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowercase_ )
_lowerCamelCase : List[str] =int(np.ma.count(self.img ) / self.img[1].size )
_lowerCamelCase : int =self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_lowerCamelCase : List[Any] =self.img[j][i]
if num != self.last_list[num]:
_lowerCamelCase : int =self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def lowerCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def lowerCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCamelCase = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
lowerCamelCase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 199 |
from sklearn.metrics import mean_squared_error
import datasets
lowerCamelCase = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
lowerCamelCase = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
lowerCamelCase = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def lowerCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def lowerCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def lowerCamelCase ( self : List[Any] , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : List[str]=None , lowercase_ : Union[str, Any]="uniform_average" , lowercase_ : Tuple=True ) -> Any:
"""simple docstring"""
_lowerCamelCase : List[str] =mean_squared_error(
lowercase_ , lowercase_ , sample_weight=lowercase_ , multioutput=lowercase_ , squared=lowercase_ )
return {"mse": mse}
| 199 | 1 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def A_ ( A__ ) -> Optional[int]:
if isinstance(A__ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class A__ :
"""simple docstring"""
def __lowercase ( self , lowercase , lowercase) -> Any:
'''simple docstring'''
pass
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
pass
def __lowercase ( self) -> List[str]:
'''simple docstring'''
pass
def __lowercase ( self , lowercase , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : List[str] = np.abs((a - b)).max()
self.assertLessEqual(lowercase , lowercase , F'Difference between torch and flax is {diff} (>= {tol}).')
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> int:
'''simple docstring'''
a__ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase , lowercase)
a__ : Optional[int] = FlaxVisionTextDualEncoderModel(lowercase)
a__ : List[Any] = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Tuple:
'''simple docstring'''
a__ : int = self.get_vision_text_model(lowercase , lowercase)
a__ : Optional[Any] = {'vision_model': vision_model, 'text_model': text_model}
a__ : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase)
a__ : List[Any] = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Tuple:
'''simple docstring'''
a__ : Dict = self.get_vision_text_model(lowercase , lowercase)
a__ : Optional[Any] = {'vision_model': vision_model, 'text_model': text_model}
a__ : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase)
a__ : Any = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase)
a__ : str = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase)
a__ : str = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase)
a__ : Tuple = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase)
a__ : List[Any] = after_output[0]
a__ : Tuple = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowercase , 1e-3)
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : int = self.get_vision_text_model(lowercase , lowercase)
a__ : Optional[int] = {'vision_model': vision_model, 'text_model': text_model}
a__ : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase)
a__ : Any = model(
input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase , output_attentions=lowercase)
a__ : Tuple = output.vision_model_output.attentions
self.assertEqual(len(lowercase) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ : Optional[int] = to_atuple(vision_model.config.image_size)
a__ : Union[str, Any] = to_atuple(vision_model.config.patch_size)
a__ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a__ : Optional[Any] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
a__ : Optional[int] = output.text_model_output.attentions
self.assertEqual(len(lowercase) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowercase ( self , lowercase , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
pt_model.to(lowercase)
pt_model.eval()
# prepare inputs
a__ : Optional[int] = inputs_dict
a__ : Any = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()}
with torch.no_grad():
a__ : Union[str, Any] = pt_model(**lowercase).to_tuple()
a__ : Tuple = fx_model(**lowercase).to_tuple()
self.assertEqual(len(lowercase) , len(lowercase) , 'Output lengths differ between Flax and PyTorch')
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4]):
self.assert_almost_equals(lowercase , pt_output.numpy() , 4e-2)
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowercase)
a__ : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase , from_pt=lowercase)
a__ : Optional[int] = fx_model_loaded(**lowercase).to_tuple()
self.assertEqual(len(lowercase) , len(lowercase) , 'Output lengths differ between Flax and PyTorch')
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4]):
self.assert_almost_equals(lowercase , pt_output.numpy() , 4e-2)
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowercase)
a__ : Optional[Any] = VisionTextDualEncoderModel.from_pretrained(lowercase , from_flax=lowercase)
pt_model_loaded.to(lowercase)
pt_model_loaded.eval()
with torch.no_grad():
a__ : Dict = pt_model_loaded(**lowercase).to_tuple()
self.assertEqual(len(lowercase) , len(lowercase) , 'Output lengths differ between Flax and PyTorch')
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4]):
self.assert_almost_equals(lowercase , pt_output_loaded.numpy() , 4e-2)
def __lowercase ( self , lowercase , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
a__ : Optional[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase , lowercase)
a__ : Union[str, Any] = VisionTextDualEncoderModel(lowercase)
a__ : Any = FlaxVisionTextDualEncoderModel(lowercase)
a__ : Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase)
a__ : Any = fx_state
self.check_pt_flax_equivalence(lowercase , lowercase , lowercase)
def __lowercase ( self , lowercase , lowercase , lowercase) -> List[str]:
'''simple docstring'''
a__ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase , lowercase)
a__ : Optional[int] = VisionTextDualEncoderModel(lowercase)
a__ : Optional[int] = FlaxVisionTextDualEncoderModel(lowercase)
a__ : Dict = load_flax_weights_in_pytorch_model(lowercase , fx_model.params)
self.check_pt_flax_equivalence(lowercase , lowercase , lowercase)
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : int = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowercase)
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : Any = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowercase)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Union[str, Any] = self.prepare_config_and_inputs()
self.check_save_load(**lowercase)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Tuple = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowercase)
@is_pt_flax_cross_test
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = self.prepare_config_and_inputs()
a__ : Optional[Any] = config_inputs_dict.pop('vision_config')
a__ : Optional[Any] = config_inputs_dict.pop('text_config')
a__ : Optional[int] = config_inputs_dict
self.check_equivalence_pt_to_flax(lowercase , lowercase , lowercase)
self.check_equivalence_flax_to_pt(lowercase , lowercase , lowercase)
@slow
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : str = self.get_pretrained_model_and_inputs()
a__ : str = model_a(**lowercase)
a__ : Optional[Any] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowercase)
a__ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase)
a__ : Optional[Any] = model_a(**lowercase)
a__ : Tuple = after_outputs[0]
a__ : List[str] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowercase , 1e-5)
@require_flax
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=lowercase , text_from_pt=lowercase , )
a__ : Optional[Any] = 13
a__ : List[str] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
a__ : List[str] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
a__ : List[Any] = random_attention_mask([batch_size, 4])
a__ : Optional[int] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowercase ( self , lowercase , lowercase) -> Any:
'''simple docstring'''
a__ : int = FlaxViTModel(lowercase)
a__ : Optional[int] = FlaxBertModel(lowercase)
return vision_model, text_model
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Dict = FlaxViTModelTester(self)
a__ : Dict = FlaxBertModelTester(self)
a__ : int = vit_model_tester.prepare_config_and_inputs()
a__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
a__ : Tuple = vision_config_and_inputs
a__ : str = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=lowercase , text_from_pt=lowercase , )
a__ : Dict = 13
a__ : Any = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
a__ : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
a__ : str = random_attention_mask([batch_size, 4])
a__ : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowercase ( self , lowercase , lowercase) -> int:
'''simple docstring'''
a__ : Optional[int] = FlaxCLIPVisionModel(lowercase)
a__ : Dict = FlaxBertModel(lowercase)
return vision_model, text_model
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Tuple = FlaxCLIPVisionModelTester(self)
a__ : int = FlaxBertModelTester(self)
a__ : int = clip_model_tester.prepare_config_and_inputs()
a__ : int = bert_model_tester.prepare_config_and_inputs()
a__ : Any = vision_config_and_inputs
a__ : Tuple = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0)
a__ : Tuple = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian')
a__ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
a__ : List[str] = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=lowercase , padding=lowercase , return_tensors='np')
a__ : Any = model(**lowercase)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
a__ : int = np.array([[1.2_28_47_27, 0.3_10_41_22]])
self.assertTrue(np.allclose(outputs.logits_per_image , lowercase , atol=1e-3))
| 365 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowercase : Optional[Any] = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
lowercase : List[str] = {
"""RUCAIBox/mvp""": 1_0_2_4,
}
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Dict = VOCAB_FILES_NAMES
__A : str = PRETRAINED_VOCAB_FILES_MAP
__A : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Any = ['''input_ids''', '''attention_mask''']
__A : Tuple = MvpTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , lowercase=True , **lowercase , ) -> str:
'''simple docstring'''
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
a__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , lowercase) != add_prefix_space:
a__ : Dict = getattr(lowercase , pre_tok_state.pop('type'))
a__ : str = add_prefix_space
a__ : Union[str, Any] = pre_tok_class(**lowercase)
a__ : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a__ : Optional[int] = 'post_processor'
a__ : Optional[int] = getattr(self.backend_tokenizer , lowercase , lowercase)
if tokenizer_component_instance:
a__ : str = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ : Any = tuple(state['sep'])
if "cls" in state:
a__ : str = tuple(state['cls'])
a__ : List[str] = False
if state.get('add_prefix_space' , lowercase) != add_prefix_space:
a__ : List[str] = add_prefix_space
a__ : List[str] = True
if state.get('trim_offsets' , lowercase) != trim_offsets:
a__ : Optional[int] = trim_offsets
a__ : List[str] = True
if changes_to_apply:
a__ : Optional[int] = getattr(lowercase , state.pop('type'))
a__ : Tuple = component_class(**lowercase)
setattr(self.backend_tokenizer , lowercase , lowercase)
@property
def __lowercase ( self) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def __lowercase ( self , lowercase) -> Any:
'''simple docstring'''
a__ : Tuple = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else value
a__ : str = value
def __lowercase ( self , *lowercase , **lowercase) -> BatchEncoding:
'''simple docstring'''
a__ : Optional[Any] = kwargs.get('is_split_into_words' , lowercase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowercase , **lowercase)
def __lowercase ( self , *lowercase , **lowercase) -> BatchEncoding:
'''simple docstring'''
a__ : List[str] = kwargs.get('is_split_into_words' , lowercase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowercase , **lowercase)
def __lowercase ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
a__ : List[str] = self._tokenizer.model.save(lowercase , name=lowercase)
return tuple(lowercase)
def __lowercase ( self , lowercase , lowercase=None) -> Union[str, Any]:
'''simple docstring'''
a__ : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowercase ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__ : int = [self.sep_token_id]
a__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 225 | 0 |
'''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """EncodecFeatureExtractor"""
lowerCAmelCase__ = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str]):
'''simple docstring'''
super().__init__(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =self.feature_extractor
__lowercase =False
def __lowerCamelCase ( self : Any , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Optional[Any]=True):
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=_lowerCAmelCase , language=_lowerCAmelCase , no_timestamps=_lowerCAmelCase)
def __call__( self : Dict , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Dict):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_lowerCAmelCase , **_lowerCAmelCase)
__lowercase =kwargs.pop('audio' , _lowerCAmelCase)
__lowercase =kwargs.pop('sampling_rate' , _lowerCAmelCase)
__lowercase =kwargs.pop('text' , _lowerCAmelCase)
if len(_lowerCAmelCase) > 0:
__lowercase =args[0]
__lowercase =args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.')
if text is not None:
__lowercase =self.tokenizer(_lowerCAmelCase , **_lowerCAmelCase)
if audio is not None:
__lowercase =self.feature_extractor(_lowerCAmelCase , *_lowerCAmelCase , sampling_rate=_lowerCAmelCase , **_lowerCAmelCase)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__lowercase =audio_inputs['input_values']
if "padding_mask" in audio_inputs:
__lowercase =audio_inputs['padding_mask']
return inputs
def __lowerCamelCase ( self : Optional[int] , *_lowerCAmelCase : Any , **_lowerCAmelCase : List[str]):
'''simple docstring'''
__lowercase =kwargs.pop('audio' , _lowerCAmelCase)
__lowercase =kwargs.pop('padding_mask' , _lowerCAmelCase)
if len(_lowerCAmelCase) > 0:
__lowercase =args[0]
__lowercase =args[1:]
if audio_values is not None:
return self._decode_audio(_lowerCAmelCase , padding_mask=_lowerCAmelCase)
else:
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : str , *_lowerCAmelCase : Any , **_lowerCAmelCase : int):
'''simple docstring'''
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Optional = None):
'''simple docstring'''
__lowercase =to_numpy(_lowerCAmelCase)
__lowercase , __lowercase , __lowercase =audio_values.shape
if padding_mask is None:
return list(_lowerCAmelCase)
__lowercase =to_numpy(_lowerCAmelCase)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__lowercase =seq_len - padding_mask.shape[-1]
__lowercase =1 - self.feature_extractor.padding_value
__lowercase =np.pad(_lowerCAmelCase , ((0, 0), (0, difference)) , 'constant' , constant_values=_lowerCAmelCase)
__lowercase =audio_values.tolist()
for i in range(_lowerCAmelCase):
__lowercase =np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__lowercase =sliced_audio.reshape(_lowerCAmelCase , -1)
return audio_values
| 166 |
'''simple docstring'''
from typing import List
import numpy as np
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase ={key: len(_lowerCAmelCase ) for key, value in gen_kwargs.items() if isinstance(_lowerCAmelCase , _lowerCAmelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(f"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() )
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
) )
__lowercase =max(lists_lengths.values() , default=0 )
return max(1 , _lowerCAmelCase )
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =[]
for group_idx in range(_lowerCAmelCase ):
__lowercase =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
__lowercase =shards_indices_per_group[-1].stop if shards_indices_per_group else 0
__lowercase =range(_lowerCAmelCase , start + num_shards_to_add )
shards_indices_per_group.append(_lowerCAmelCase )
return shards_indices_per_group
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =_number_of_shards_in_gen_kwargs(_lowerCAmelCase )
if num_shards == 1:
return [dict(_lowerCAmelCase )]
else:
__lowercase =_distribute_shards(num_shards=_lowerCAmelCase , max_num_jobs=_lowerCAmelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(_lowerCAmelCase , _lowerCAmelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(_lowerCAmelCase ) )
]
def _A ( _lowerCAmelCase ):
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , _lowerCAmelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase ={len(_lowerCAmelCase ) for value in gen_kwargs.values() if isinstance(_lowerCAmelCase , _lowerCAmelCase )}
__lowercase ={}
for size in list_sizes:
__lowercase =list(range(_lowerCAmelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
__lowercase =dict(_lowerCAmelCase )
for key, value in shuffled_kwargs.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase =[value[i] for i in indices_per_size[len(_lowerCAmelCase )]]
return shuffled_kwargs
| 166 | 1 |
from datetime import datetime
import requests
def _a ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
UpperCamelCase__ : str = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
UpperCamelCase__ : Dict = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(SCREAMING_SNAKE_CASE ).content
if __name__ == "__main__":
__UpperCamelCase : Any = input("Enter Video/IGTV url: ").strip()
__UpperCamelCase : str = f"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f"Done. Video saved to disk as {file_name}.")
| 51 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( __lowerCAmelCase , unittest.TestCase):
A: int = CTRLTokenizer
A: List[Any] = False
A: Dict = False
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ : Dict = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
UpperCamelCase__ : List[str] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
UpperCamelCase__ : Tuple = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
UpperCamelCase__ : int = {'''unk_token''': '''<unk>'''}
UpperCamelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase__ ) )
def UpperCAmelCase__ ( self : Tuple , **lowerCamelCase__ : str ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Any ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = '''adapt react readapt apt'''
UpperCamelCase__ : Optional[Any] = '''adapt react readapt apt'''
return input_text, output_text
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ : int = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase__ : Optional[Any] = '''adapt react readapt apt'''
UpperCamelCase__ : List[Any] = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
UpperCamelCase__ : Tuple = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Dict = tokens + [tokenizer.unk_token]
UpperCamelCase__ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
| 51 | 1 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def A (__A : Optional[int] , __A : int , __A : str=None ) -> List[Any]:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F"""{torch_layer} layer.weight does not match"""
UpperCAmelCase_ = nn.Parameter(__A )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"""{torch_layer} layer.bias does not match"""
UpperCAmelCase_ = nn.Parameter(__A )
def A (__A : Tuple , __A : Dict , __A : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = np.asarray(weights[0] )
UpperCAmelCase_ = np.asarray(weights[1] )
UpperCAmelCase_ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.output.dense , torch.tensor(__A ).view(-1 , __A ).contiguous().transpose(0 , 1 ) , )
def A (__A : Optional[Any] , __A : Any , __A : List[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ = np.asarray(weights[0] )
UpperCAmelCase_ = np.asarray(weights[1] )
UpperCAmelCase_ = np.asarray(weights[2] )
UpperCAmelCase_ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.output.dense , torch.tensor(__A ).view(-1 , __A ).contiguous().transpose(0 , 1 ) , )
def A (__A : int , __A : Union[str, Any] , __A : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = weights[0][0][0]
UpperCAmelCase_ = np.asarray(layer_norm_a[0] )
UpperCAmelCase_ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , )
# lsh weights + output
UpperCAmelCase_ = weights[0][1]
if len(__A ) < 4:
set_layer_weights_in_torch_lsh(__A , torch_block.attention , __A )
else:
set_layer_weights_in_torch_local(__A , torch_block.attention , __A )
# intermediate weighs
UpperCAmelCase_ = weights[2][0][1][2]
# Chunked Feed Forward
if len(__A ) == 4:
UpperCAmelCase_ = intermediate_weights[2]
# layernorm 2
UpperCAmelCase_ = np.asarray(intermediate_weights[0][0] )
UpperCAmelCase_ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , )
# intermediate dense
UpperCAmelCase_ = np.asarray(intermediate_weights[1][0] )
UpperCAmelCase_ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , )
# intermediate out
UpperCAmelCase_ = np.asarray(intermediate_weights[4][0] )
UpperCAmelCase_ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , )
def A (__A : Optional[int] , __A : Tuple , __A : Any ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = torch_model.reformer
# word embeds
UpperCAmelCase_ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__A ) , )
if isinstance(weights[3] , __A ):
UpperCAmelCase_ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
UpperCAmelCase_ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"""{position_embeddings[emb_idx]} emb does not match"""
UpperCAmelCase_ = nn.Parameter(torch.tensor(__A ) )
UpperCAmelCase_ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__A ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
UpperCAmelCase_ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__A , __A , __A )
# output layer norm
UpperCAmelCase_ = np.asarray(weights[7][0] )
UpperCAmelCase_ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , )
# output embeddings
UpperCAmelCase_ = np.asarray(weights[9][0] )
UpperCAmelCase_ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , )
def A (__A : Tuple , __A : int , __A : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = ReformerConfig.from_json_file(__A )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase_ = ReformerModelWithLMHead(__A )
with open(__A , '''rb''' ) as f:
UpperCAmelCase_ = pickle.load(__A )['''weights''']
set_model_weights_in_torch(__A , __A , config.hidden_size )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __A )
if __name__ == "__main__":
snake_case_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
snake_case_ : List[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 51 | '''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : list ) -> list:
"""simple docstring"""
_UpperCAmelCase : List[Any] = len(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
_UpperCAmelCase , _UpperCAmelCase : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = list(range(10, 0, -1))
print(F'Original: {arr}. Sorted: {odd_even_transposition(arr)}')
| 31 | 0 |
from __future__ import annotations
def __lowerCamelCase ( snake_case__ ) -> None:
"""simple docstring"""
create_state_space_tree(snake_case__ ,[] ,0 ,[0 for i in range(len(snake_case__ ) )] )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,) -> None:
"""simple docstring"""
if index == len(snake_case__ ):
print(snake_case__ )
return
for i in range(len(snake_case__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_SCREAMING_SNAKE_CASE = True
create_state_space_tree(snake_case__ ,snake_case__ ,index + 1 ,snake_case__ )
current_sequence.pop()
_SCREAMING_SNAKE_CASE = False
UpperCamelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
UpperCamelCase = ['''A''', '''B''', '''C''']
generate_all_permutations(sequence_a)
| 366 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : int = TransfoXLTokenizer
__snake_case : Tuple = False
__snake_case : List[Any] = False
def UpperCamelCase ( self: int ):
'''simple docstring'''
super().setUp()
_SCREAMING_SNAKE_CASE = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def UpperCamelCase ( self: Any , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """<unk> UNwanted , running"""
_SCREAMING_SNAKE_CASE = """<unk> unwanted, running"""
return input_text, output_text
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(UpperCAmelCase_ , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [0, 4, 8, 7] )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TransfoXLTokenizer(lower_case=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TransfoXLTokenizer(lower_case=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TransfoXLTokenizer(lower_case=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
_SCREAMING_SNAKE_CASE = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(tokenizer.convert_tokens_to_string(UpperCAmelCase_ ) , UpperCAmelCase_ )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = len(UpperCAmelCase_ )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(UpperCAmelCase_ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 125 | 0 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class a ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Dict = WavaVecaPhonemeCTCTokenizer
lowerCamelCase :Optional[int] = False
def UpperCAmelCase ( self ) -> Optional[int]:
super().setUp()
_A = (
"""<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː """
"""ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː """
"""ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 """
"""oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ """
"""pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ """
"""yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ """
"""əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ """
"""ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ """
"""ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ """
"""uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ """
"""ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ """
"""ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ """
"""ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"""
).split(""" """ )
_A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A = {"""pad_token""": """<pad>""", """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>"""}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=20 , lowerCAmelCase_=5 ) -> Tuple[str, list]:
_A = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCAmelCase_ )) for i in range(len(lowerCAmelCase_ ) )]
_A = list(filter(lambda lowerCAmelCase_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowerCAmelCase_ ) , lowerCAmelCase_ ) )
if max_length is not None and len(lowerCAmelCase_ ) > max_length:
_A = toks[:max_length]
if min_length is not None and len(lowerCAmelCase_ ) < min_length and len(lowerCAmelCase_ ) > 0:
while len(lowerCAmelCase_ ) < min_length:
_A = toks + toks
# toks_str = [t[1] for t in toks]
_A = [t[0] for t in toks]
# Ensure consistency
_A = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
if " " not in output_txt and len(lowerCAmelCase_ ) > 1:
_A = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCAmelCase_ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCAmelCase_ )
)
if with_prefix_space:
_A = """ """ + output_txt
_A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
return output_txt, output_ids
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Any:
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
# check adding a single token
tokenizer.add_tokens("""xxx""" )
_A = tokenizer("""m xxx ɪ""" , do_phonemize=lowerCAmelCase_ ).input_ids
self.assertEqual(lowerCAmelCase_ , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(["""aaa""", """bbb""", """ccc"""] )
_A = tokenizer("""m aaa ɪ ccc""" , do_phonemize=lowerCAmelCase_ ).input_ids
self.assertEqual(lowerCAmelCase_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
_A = tokenizer("""maɪ c""" , do_phonemize=lowerCAmelCase_ ).input_ids
self.assertEqual(lowerCAmelCase_ , [3, 2_00] ) # mai should be <unk> (=3)
def UpperCAmelCase ( self ) -> int:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
self.assertEqual(lowerCAmelCase_ , """h ə l oʊ h aʊ ɑːɹ j uː""" )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowerCAmelCase_ ).input_ids , tokenizer(lowerCAmelCase_ , do_phonemize=lowerCAmelCase_ ).input_ids )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
_A = tokenizer.decode(tokenizer(lowerCAmelCase_ ).input_ids )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_A = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
_A = tokenizer.decode(sample_ids[0] )
_A = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , batch_tokens[0] )
self.assertEqual(lowerCAmelCase_ , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
def UpperCAmelCase ( self ) -> str:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
self.assertEqual(lowerCAmelCase_ , """h ə l oʊ | h aʊ | ɑːɹ | j uː |""" )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowerCAmelCase_ ).input_ids , tokenizer(lowerCAmelCase_ , do_phonemize=lowerCAmelCase_ ).input_ids )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
_A = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
_A = tokenizer.decode(sample_ids[0] )
_A = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , batch_tokens[0] )
self.assertEqual(lowerCAmelCase_ , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
# decode with no word_del_token filter
_A = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowerCAmelCase_ )
_A = tokenizer.batch_decode(lowerCAmelCase_ , filter_word_delimiter_token=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , batch_tokens[0] )
self.assertEqual(lowerCAmelCase_ , ["""k s ɾ | ɾ l | ɭʲ""", """| j ð | s j ð s oːɹ"""] )
def UpperCAmelCase ( self ) -> Dict:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
_A = tokenizer.decode(tokenizer(lowerCAmelCase_ ).input_ids , filter_word_delimiter_token=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_A = """Hello how are you"""
_A = tokenizer.phonemize(lowerCAmelCase_ , phonemizer_lang="""en-us""" )
_A = tokenizer.decode(tokenizer(lowerCAmelCase_ ).input_ids , filter_word_delimiter_token=lowerCAmelCase_ )
self.assertEqual(""" """.join([p.strip() for p in phonemes.split(""" |""" )] ).strip() , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token=lowerCAmelCase_ )
_A = """Hello how are you"""
_A = tokenizer(lowerCAmelCase_ , phonemizer_lang="""en-us""" ).input_ids
_A = tokenizer(lowerCAmelCase_ , phonemizer_lang="""fr-fr""" ).input_ids
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_A = tokenizer.decode(lowerCAmelCase_ )
_A = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , """h ə l oʊ h aʊ ɑːɹ j uː""" )
self.assertEqual(lowerCAmelCase_ , """ɛ l o h aʊ a ʁ j u""" )
def UpperCAmelCase ( self ) -> Any:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_A = """Hello how Are you"""
_A = """hello how are you"""
_A = tokenizer(lowerCAmelCase_ ).input_ids
_A = tokenizer(lowerCAmelCase_ ).input_ids
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
tokenizer.add_tokens(["""!""", """?"""] )
tokenizer.add_special_tokens({"""cls_token""": """$$$"""} )
# fmt: off
_A = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
_A = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , ["""k s ɾ ɾ l ɭʲ!?!? $$$""", """j ð s j ð s oːɹ $$$"""] )
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_A = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase ( self ) -> Tuple:
_A = self.get_tokenizer(word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
_A = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
_A = tokenizer.decode(lowerCAmelCase_ , output_char_offsets=lowerCAmelCase_ , filter_word_delimiter_token=lowerCAmelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""char_offsets""" in outputs )
self.assertTrue(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) , ["""k""", """s""", """ɾ""", """ɾ""", """|""", """ɾ""", """l""", """|""", """ɭʲ"""] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """start_offset""" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """end_offset""" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.get_tokenizer(word_delimiter_token="""|""" )
def check_list_tuples_equal(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertTrue(isinstance(outputs_list[0] , lowerCAmelCase_ ) )
# transform list to ModelOutput
_A = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["""text"""] , outputs_batch_a["""text"""] )
def recursive_check(lowerCAmelCase_ , lowerCAmelCase_ ):
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
[recursive_check(lowerCAmelCase_ , lowerCAmelCase_ ) for la, la in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["""char_offsets"""] , outputs_batch_a["""char_offsets"""] )
# fmt: off
_A = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
_A = tokenizer.batch_decode(lowerCAmelCase_ , output_char_offsets=lowerCAmelCase_ )
_A = [tokenizer.decode(lowerCAmelCase_ , output_char_offsets=lowerCAmelCase_ ) for ids in sample_ids]
check_list_tuples_equal(lowerCAmelCase_ , lowerCAmelCase_ )
@unittest.skip("""Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes""" )
def UpperCAmelCase ( self ) -> int:
pass
@unittest.skip("""Wav2Vec2PhonemeTokenizer always puts spaces between phonemes""" )
def UpperCAmelCase ( self ) -> List[str]:
pass
@unittest.skip("""encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency""" )
def UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip("""Wav2Vec2PhonemeModel has no max model length => no testing""" )
def UpperCAmelCase ( self ) -> Optional[int]:
pass
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A = tokenizer.vocab_size
_A = len(lowerCAmelCase_ )
self.assertNotEqual(lowerCAmelCase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_A = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
_A = tokenizer.add_tokens(lowerCAmelCase_ )
_A = tokenizer.vocab_size
_A = len(lowerCAmelCase_ )
self.assertNotEqual(lowerCAmelCase_ , 0 )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
self.assertEqual(lowerCAmelCase_ , all_size + len(lowerCAmelCase_ ) )
_A = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=lowerCAmelCase_ )
self.assertGreaterEqual(len(lowerCAmelCase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_A = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
_A = tokenizer.add_special_tokens(lowerCAmelCase_ )
_A = tokenizer.vocab_size
_A = len(lowerCAmelCase_ )
self.assertNotEqual(lowerCAmelCase_ , 0 )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
self.assertEqual(lowerCAmelCase_ , all_size_a + len(lowerCAmelCase_ ) )
_A = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=lowerCAmelCase_ )
self.assertGreaterEqual(len(lowerCAmelCase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def UpperCAmelCase ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase ( self ) -> str:
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
_A = self.get_tokenizers(fast=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A = ["""ð""", """ɪ""", """s""", """ɪ""", """z""", """ɐ""", """t""", """ɛ""", """k""", """s""", """t"""]
_A = tokenizer.convert_tokens_to_string(lowerCAmelCase_ )
self.assertIsInstance(output["""text"""] , lowerCAmelCase_ )
| 180 | import collections
import importlib.util
import os
import re
from pathlib import Path
_SCREAMING_SNAKE_CASE = 'src/transformers'
# Matches is_xxx_available()
_SCREAMING_SNAKE_CASE = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
_SCREAMING_SNAKE_CASE = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_SCREAMING_SNAKE_CASE = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
_SCREAMING_SNAKE_CASE = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
_SCREAMING_SNAKE_CASE = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_SCREAMING_SNAKE_CASE = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
_SCREAMING_SNAKE_CASE = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
_SCREAMING_SNAKE_CASE = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
_SCREAMING_SNAKE_CASE = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
_SCREAMING_SNAKE_CASE = re.compile(R'^\s*try:')
# Catches a line with else:
_SCREAMING_SNAKE_CASE = re.compile(R'^\s*else:')
def snake_case ( snake_case__ :Optional[Any]) -> List[str]:
if _re_test_backend.search(snake_case__) is None:
return None
_A = [b[0] for b in _re_backend.findall(snake_case__)]
backends.sort()
return "_and_".join(snake_case__)
def snake_case ( snake_case__ :Any) -> Any:
with open(snake_case__ , """r""" , encoding="""utf-8""" , newline="""\n""") as f:
_A = f.readlines()
_A = 0
while line_index < len(snake_case__) and not lines[line_index].startswith("""_import_structure = {"""):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(snake_case__):
return None
# First grab the objects without a specific backend in _import_structure
_A = []
while not lines[line_index].startswith("""if TYPE_CHECKING""") and find_backend(lines[line_index]) is None:
_A = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(snake_case__):
_A = _re_one_line_import_struct.search(snake_case__).groups()[0]
_A = re.findall("""\[([^\]]+)\]""" , snake_case__)
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """)])
line_index += 1
continue
_A = _re_import_struct_key_value.search(snake_case__)
if single_line_import_search is not None:
_A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """) if len(snake_case__) > 0]
objects.extend(snake_case__)
elif line.startswith(""" """ * 8 + """\""""):
objects.append(line[9:-3])
line_index += 1
_A = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING"""):
# If the line is an if not is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(""" """ * 4):
_A = lines[line_index]
if _re_import_struct_add_one.search(snake_case__) is not None:
objects.append(_re_import_struct_add_one.search(snake_case__).groups()[0])
elif _re_import_struct_add_many.search(snake_case__) is not None:
_A = _re_import_struct_add_many.search(snake_case__).groups()[0].split(""", """)
_A = [obj[1:-1] for obj in imports if len(snake_case__) > 0]
objects.extend(snake_case__)
elif _re_between_brackets.search(snake_case__) is not None:
_A = _re_between_brackets.search(snake_case__).groups()[0].split(""", """)
_A = [obj[1:-1] for obj in imports if len(snake_case__) > 0]
objects.extend(snake_case__)
elif _re_quote_object.search(snake_case__) is not None:
objects.append(_re_quote_object.search(snake_case__).groups()[0])
elif line.startswith(""" """ * 8 + """\""""):
objects.append(line[9:-3])
elif line.startswith(""" """ * 12 + """\""""):
objects.append(line[13:-3])
line_index += 1
_A = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_A = []
while (
line_index < len(snake_case__)
and find_backend(lines[line_index]) is None
and not lines[line_index].startswith("""else""")
):
_A = lines[line_index]
_A = _re_import.search(snake_case__)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """))
elif line.startswith(""" """ * 8):
objects.append(line[8:-2])
line_index += 1
_A = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(snake_case__):
# If the line is an if is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(""" """ * 8):
_A = lines[line_index]
_A = _re_import.search(snake_case__)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """))
elif line.startswith(""" """ * 12):
objects.append(line[12:-2])
line_index += 1
_A = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def snake_case ( snake_case__ :Dict , snake_case__ :int) -> List[Any]:
def find_duplicates(snake_case__ :Union[str, Any]):
return [k for k, v in collections.Counter(snake_case__).items() if v > 1]
if list(import_dict_objects.keys()) != list(type_hint_objects.keys()):
return ["Both sides of the init do not have the same backends!"]
_A = []
for key in import_dict_objects.keys():
_A = find_duplicates(import_dict_objects[key])
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''')
_A = find_duplicates(type_hint_objects[key])
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''')
if sorted(set(import_dict_objects[key])) != sorted(set(type_hint_objects[key])):
_A = """base imports""" if key == """none""" else F'''{key} backend'''
errors.append(F'''Differences for {name}:''')
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''')
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''')
return errors
def snake_case ( ) -> int:
_A = []
for root, _, files in os.walk(snake_case__):
if "__init__.py" in files:
_A = os.path.join(snake_case__ , """__init__.py""")
_A = parse_init(snake_case__)
if objects is not None:
_A = analyze_results(*snake_case__)
if len(snake_case__) > 0:
_A = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(snake_case__))
if len(snake_case__) > 0:
raise ValueError("""\n\n""".join(snake_case__))
def snake_case ( ) -> Optional[Any]:
_A = []
for path, directories, files in os.walk(snake_case__):
for folder in directories:
# Ignore private modules
if folder.startswith("""_"""):
directories.remove(snake_case__)
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(snake_case__) / folder).glob("""*.py"""))) == 0:
continue
_A = str((Path(snake_case__) / folder).relative_to(snake_case__))
_A = short_path.replace(os.path.sep , """.""")
submodules.append(snake_case__)
for fname in files:
if fname == "__init__.py":
continue
_A = str((Path(snake_case__) / fname).relative_to(snake_case__))
_A = short_path.replace(""".py""" , """""").replace(os.path.sep , """.""")
if len(submodule.split(""".""")) == 1:
submodules.append(snake_case__)
return submodules
_SCREAMING_SNAKE_CASE = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def snake_case ( ) -> Union[str, Any]:
# This is to make sure the transformers module imported is the one in the repo.
_A = importlib.util.spec_from_file_location(
"""transformers""" , os.path.join(snake_case__ , """__init__.py""") , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
_A = spec.loader.load_module()
_A = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(snake_case__) > 0:
_A = """\n""".join(F'''- {module}''' for module in module_not_registered)
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
F'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""")
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 180 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A :
def __init__( self : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=13 , lowerCAmelCase_ : str=64 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Union[str, Any]=32 , lowerCAmelCase_ : Union[str, Any]=5 , lowerCAmelCase_ : List[str]=4 , lowerCAmelCase_ : List[str]=37 , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Any=10 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : Union[str, Any]=[1, 16, 4, 4] , lowerCAmelCase_ : Union[str, Any]=None , ) -> str:
"""simple docstring"""
_a = parent
_a = batch_size
_a = image_size
_a = patch_size
_a = num_channels
_a = is_training
_a = use_labels
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = type_sequence_label_size
_a = initializer_range
_a = scope
_a = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
_a = (self.image_size // 32) ** 2
_a = num_patches + 1
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
_a = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=lowerCAmelCase_ , )
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str ) -> List[Any]:
"""simple docstring"""
_a = ViTHybridModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
_a = self.type_sequence_label_size
_a = ViTHybridForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A ( _a ,_a ,unittest.TestCase ):
lowercase_ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowercase_ = (
{'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_a = ViTHybridModelTester(self )
_a = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def __lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = _config_zero_init(lowerCAmelCase_ )
for model_class in self.all_model_classes:
_a = model_class(config=lowerCAmelCase_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
_a = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def __lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = ViTHybridModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case_ ():
'''simple docstring'''
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
_a = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCAmelCase_ )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=lowerCAmelCase_ , return_tensors='''pt''' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_a = model(**lowerCAmelCase_ )
# verify the logits
_a = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_a = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
@require_accelerate
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_a = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
_a = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' )
_a = prepare_img()
_a = image_processor(images=lowerCAmelCase_ , return_tensors='''pt''' )
_a = model(**lowerCAmelCase_ )
_a = outputs.logits
# model predicts one of the 1000 ImageNet classes
_a = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
| 364 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def snake_case_ (UpperCamelCase : Namespace ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_snake_case : List[Any] = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class A ( _a ):
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : ArgumentParser ) -> Any:
"""simple docstring"""
_a = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=lowerCAmelCase_ , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , *lowerCAmelCase_ : Optional[Any] , ) -> List[str]:
"""simple docstring"""
_a = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F'Loading model {model_type}' )
_a = model_type
_a = tf_checkpoint
_a = pytorch_dump_output
_a = config
_a = finetuning_task_name
def __lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
if "ckpt" in self._tf_checkpoint.lower():
_a = self._tf_checkpoint
_a = ''''''
else:
_a = self._tf_checkpoint
_a = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
lowerCAmelCase_ , self._config , self._pytorch_dump_output , lowerCAmelCase_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 179 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__A : Union[str, Any] = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Dict = "facebook/nllb-200-distilled-600M"
SCREAMING_SNAKE_CASE_ : List[str] = (
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
)
SCREAMING_SNAKE_CASE_ : List[str] = "translator"
SCREAMING_SNAKE_CASE_ : str = AutoTokenizer
SCREAMING_SNAKE_CASE_ : Any = AutoModelForSeqaSeqLM
SCREAMING_SNAKE_CASE_ : str = LANGUAGE_CODES
SCREAMING_SNAKE_CASE_ : str = ["text", "text", "text"]
SCREAMING_SNAKE_CASE_ : Any = ["text"]
def A ( self : List[Any] , A : str , A : Union[str, Any] , A : List[str] ) -> List[str]:
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''' )
lowercase_ : str = self.lang_to_code[src_lang]
lowercase_ : Union[str, Any] = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
A , return_tensors='''pt''' , src_lang=A , tgt_lang=A )
def A ( self : str , A : Optional[int] ) -> List[str]:
return self.model.generate(**A )
def A ( self : Any , A : Union[str, Any] ) -> List[str]:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=A )
| 33 |
"""simple docstring"""
def lowercase ( __snake_case : int = 1_0_0 ):
lowercase_ : str = 0
lowercase_ : List[Any] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 33 | 1 |
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__lowerCamelCase : Optional[int] = get_logger()
__lowerCamelCase : Optional[dict] = None
class __snake_case ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self : List[str] , _lowercase : Any=None , _lowercase : Any=None , **_lowercase : List[str] ):
"""simple docstring"""
super().__init__(features=lowerCAmelCase__ )
import jax
from jaxlib.xla_client import Device
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(lowerCAmelCase__ )}, as `jaxlib.xla_extension.Device` """
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
SCREAMING_SNAKE_CASE__ = device if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE__ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
SCREAMING_SNAKE_CASE__ = str(jax.devices()[0] )
SCREAMING_SNAKE_CASE__ = jnp_array_kwargs
@staticmethod
def __a ( ):
"""simple docstring"""
import jax
return {str(lowerCAmelCase__ ): device for device in jax.devices()}
def __a ( self : int , _lowercase : Tuple ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and column:
if all(
isinstance(lowerCAmelCase__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(lowerCAmelCase__ , axis=0 )
return column
def __a ( self : Any , _lowercase : Union[str, Any] ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(lowerCAmelCase__ , (str, bytes, type(lowerCAmelCase__ )) ):
return value
elif isinstance(lowerCAmelCase__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE__ = {}
if isinstance(lowerCAmelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
SCREAMING_SNAKE_CASE__ = {"dtype": jnp.intaa}
else:
SCREAMING_SNAKE_CASE__ = {"dtype": jnp.intaa}
elif isinstance(lowerCAmelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE__ = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCAmelCase__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = np.asarray(lowerCAmelCase__ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE__ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(lowerCAmelCase__ , **{**default_dtype, **self.jnp_array_kwargs} )
def __a ( self : Optional[Any] , _lowercase : Optional[int] ):
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(lowerCAmelCase__ , """__array__""" ) and not isinstance(lowerCAmelCase__ , jax.Array ):
SCREAMING_SNAKE_CASE__ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCAmelCase__ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCAmelCase__ ) for substruct in data_struct] )
elif isinstance(lowerCAmelCase__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCAmelCase__ ) for substruct in data_struct] )
return self._tensorize(lowerCAmelCase__ )
def __a ( self : Union[str, Any] , _lowercase : Tuple ):
"""simple docstring"""
return map_nested(self._recursive_tensorize , lowerCAmelCase__ , map_list=lowerCAmelCase__ )
def __a ( self : Union[str, Any] , _lowercase : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.numpy_arrow_extractor().extract_row(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ = self.python_features_decoder.decode_row(lowerCAmelCase__ )
return self.recursive_tensorize(lowerCAmelCase__ )
def __a ( self : Dict , _lowercase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.numpy_arrow_extractor().extract_column(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ = self.python_features_decoder.decode_column(lowerCAmelCase__ , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE__ = self.recursive_tensorize(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ = self._consolidate(lowerCAmelCase__ )
return column
def __a ( self : Optional[int] , _lowercase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.numpy_arrow_extractor().extract_batch(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ = self.python_features_decoder.decode_batch(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ = self.recursive_tensorize(lowerCAmelCase__ )
for column_name in batch:
SCREAMING_SNAKE_CASE__ = self._consolidate(batch[column_name] )
return batch
| 363 | from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __snake_case :
lowerCAmelCase_ = LEDConfig
lowerCAmelCase_ = {}
lowerCAmelCase_ = "gelu"
def __init__( self : Any , _lowercase : Tuple , _lowercase : str=13 , _lowercase : Optional[int]=7 , _lowercase : Optional[Any]=True , _lowercase : Dict=False , _lowercase : Union[str, Any]=99 , _lowercase : Any=32 , _lowercase : int=2 , _lowercase : List[str]=4 , _lowercase : Optional[int]=37 , _lowercase : Union[str, Any]=0.1 , _lowercase : str=0.1 , _lowercase : Union[str, Any]=20 , _lowercase : List[str]=2 , _lowercase : Optional[int]=1 , _lowercase : Dict=0 , _lowercase : List[str]=4 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = eos_token_id
SCREAMING_SNAKE_CASE__ = pad_token_id
SCREAMING_SNAKE_CASE__ = bos_token_id
SCREAMING_SNAKE_CASE__ = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
SCREAMING_SNAKE_CASE__ = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
SCREAMING_SNAKE_CASE__ = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE__ = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
SCREAMING_SNAKE_CASE__ = prepare_led_inputs_dict(_lowercase , _lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = tf.concat(
[tf.zeros_like(_lowercase )[:, :-1], tf.ones_like(_lowercase )[:, -1:]] , axis=-1 , )
SCREAMING_SNAKE_CASE__ = global_attention_mask
return config, inputs_dict
def __a ( self : Tuple , _lowercase : int , _lowercase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFLEDModel(config=_lowercase ).get_decoder()
SCREAMING_SNAKE_CASE__ = inputs_dict["""input_ids"""]
SCREAMING_SNAKE_CASE__ = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ = inputs_dict["""attention_mask"""][:1, :]
SCREAMING_SNAKE_CASE__ = 1
# first forward pass
SCREAMING_SNAKE_CASE__ = model(_lowercase , attention_mask=_lowercase , use_cache=_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE__ = model(_lowercase , attention_mask=_lowercase )[0]
SCREAMING_SNAKE_CASE__ = model(_lowercase , attention_mask=_lowercase , past_key_values=_lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowercase , _lowercase , rtol=1E-3 )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Dict=None , __UpperCamelCase : Optional[Any]=None , ) -> int:
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE__ = tf.cast(tf.math.not_equal(__UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __snake_case ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowerCAmelCase_ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase_ = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFLEDModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=_lowercase )
def __a ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowercase )
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = tf.zeros_like(inputs_dict["""attention_mask"""] )
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = self.model_tester.seq_length
SCREAMING_SNAKE_CASE__ = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ = outputs.decoder_attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ = [t.numpy() for t in outputs.encoder_attentions]
SCREAMING_SNAKE_CASE__ = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ = model(self._prepare_for_class(_lowercase , _lowercase ) )
SCREAMING_SNAKE_CASE__ = len(_lowercase )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE__ = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_decoder_attentions_output(_lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_lowercase ) )
self.assertEqual(model.config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
@unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" )
def __a ( self : List[Any] ):
"""simple docstring"""
pass
def __a ( self : List[str] ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
return tf.constant(__UpperCamelCase , dtype=tf.intaa )
__lowerCamelCase : List[str] = 1e-4
@slow
@require_tf
class __snake_case ( unittest.TestCase ):
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led
# change to intended input here
SCREAMING_SNAKE_CASE__ = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
SCREAMING_SNAKE_CASE__ = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
SCREAMING_SNAKE_CASE__ = prepare_led_inputs_dict(model.config , _lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = model(**_lowercase )[0]
SCREAMING_SNAKE_CASE__ = (1, 10_24, 7_68)
self.assertEqual(output.shape , _lowercase )
# change to expected output here
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , _lowercase , atol=1E-3 )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" )
# change to intended input here
SCREAMING_SNAKE_CASE__ = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
SCREAMING_SNAKE_CASE__ = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
SCREAMING_SNAKE_CASE__ = prepare_led_inputs_dict(model.config , _lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = model(**_lowercase )[0]
SCREAMING_SNAKE_CASE__ = (1, 10_24, model.config.vocab_size)
self.assertEqual(output.shape , _lowercase )
# change to expected output here
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , _lowercase , atol=1E-3 , rtol=1E-3 )
| 204 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """rwkv"""
__UpperCamelCase = {"""max_position_embeddings""": """context_length"""}
def __init__( self :Dict , lowercase_ :Tuple=5_02_77 , lowercase_ :Union[str, Any]=10_24 , lowercase_ :Union[str, Any]=40_96 , lowercase_ :Dict=32 , lowercase_ :Optional[int]=None , lowercase_ :int=None , lowercase_ :Any=1E-5 , lowercase_ :Union[str, Any]=0 , lowercase_ :List[str]=0 , lowercase_ :str=6 , lowercase_ :Optional[int]=False , lowercase_ :Tuple=True , **lowercase_ :Optional[int] , ) -> Dict:
UpperCAmelCase = vocab_size
UpperCAmelCase = context_length
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size
UpperCAmelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = rescale_every
UpperCAmelCase = use_cache
UpperCAmelCase = bos_token_id
UpperCAmelCase = eos_token_id
super().__init__(
tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 78 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _lowerCAmelCase ( lowercase_ = "isbn/0140328726" ):
UpperCAmelCase = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
UpperCAmelCase = F"""{olid} is not a valid Open Library olid"""
raise ValueError(lowercase_ )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
UpperCAmelCase = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCAmelCase = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
UpperCAmelCase = data['First sentence']['value']
for key, value in data.items():
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = ', '.join(lowercase_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
snake_case_ = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
snake_case_ = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print("""\n""".join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 78 | 1 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=30 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.0_2 , _lowerCamelCase=3 , _lowerCamelCase=0.6 , _lowerCamelCase=None , ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE : List[str] = image_size
SCREAMING_SNAKE_CASE : Optional[int] = patch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : List[Any] = is_training
SCREAMING_SNAKE_CASE : Any = use_labels
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : int = mask_ratio
SCREAMING_SNAKE_CASE : List[str] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE : Tuple = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE : Union[str, Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) ->Any:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Any:
SCREAMING_SNAKE_CASE : Optional[int] = ViTMAEModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = ViTMAEForPreTraining(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = (self.image_size // self.patch_size) ** 2
SCREAMING_SNAKE_CASE : int = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : List[Any] = ViTMAEForPreTraining(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = config_and_inputs
SCREAMING_SNAKE_CASE : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Tuple = ViTMAEModelTester(self )
SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ) ->Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) ->Tuple:
pass
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Any:
# make masks reproducible
np.random.seed(2 )
SCREAMING_SNAKE_CASE : Union[str, Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
SCREAMING_SNAKE_CASE : int = torch.from_numpy(_lowerCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
SCREAMING_SNAKE_CASE : List[str] = pt_noise
super().check_pt_tf_models(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : str = outputs[0].cpu().numpy()
SCREAMING_SNAKE_CASE : str = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class.from_pretrained(_lowerCamelCase )
model.to(_lowerCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
# Make sure we don't have nans
SCREAMING_SNAKE_CASE : Dict = after_outputs[0].cpu().numpy()
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : Dict = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCamelCase , 1e-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __lowerCAmelCase ( self ) ->Any:
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __lowerCAmelCase ( self ) ->Any:
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __lowerCAmelCase ( self ) ->Tuple:
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def __lowerCAmelCase ( self ) ->int:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCAmelCase ( self ) ->List[str]:
pass
@slow
def __lowerCAmelCase ( self ) ->Optional[Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Any = ViTMAEModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ) ->Tuple:
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self ) ->Union[str, Any]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = self.default_image_processor
SCREAMING_SNAKE_CASE : List[str] = prepare_img()
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
SCREAMING_SNAKE_CASE : List[str] = ViTMAEConfig()
SCREAMING_SNAKE_CASE : Tuple = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**_lowerCamelCase , noise=torch.from_numpy(_lowerCamelCase ).to(device=_lowerCamelCase ) )
# verify the logits
SCREAMING_SNAKE_CASE : List[Any] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(_lowerCamelCase ) , atol=1e-4 ) )
| 19 |
import datasets
from .evaluate import evaluate
a__ : Dict = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
a__ : List[str] = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
a__ : List[Any] = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : Any = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
SCREAMING_SNAKE_CASE : int = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE : Dict = evaluate(dataset=_lowerCamelCase , predictions=_lowerCamelCase )
return score
| 19 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'philschmid/bart-large-cnn-samsum'
lowerCAmelCase__ = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
lowerCAmelCase__ = 'summarizer'
lowerCAmelCase__ = AutoTokenizer
lowerCAmelCase__ = AutoModelForSeqaSeqLM
lowerCAmelCase__ = ['text']
lowerCAmelCase__ = ['text']
def lowercase_ ( self : int , _A : List[Any] ):
'''simple docstring'''
return self.pre_processor(_A , return_tensors='''pt''' , truncation=_A )
def lowercase_ ( self : List[str] , _A : Optional[Any] ):
'''simple docstring'''
return self.model.generate(**_A )[0]
def lowercase_ ( self : Optional[int] , _A : Tuple ):
'''simple docstring'''
return self.pre_processor.decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
| 181 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
UpperCamelCase__ = {
'''facebook/bart-base''': 1_0_2_4,
'''facebook/bart-large''': 1_0_2_4,
'''facebook/bart-large-mnli''': 1_0_2_4,
'''facebook/bart-large-cnn''': 1_0_2_4,
'''facebook/bart-large-xsum''': 1_0_2_4,
'''yjernite/bart_eli5''': 1_0_2_4,
}
@lru_cache()
def a__ ( ) -> List[Any]:
UpperCAmelCase__ : int = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
UpperCAmelCase__ : Optional[int] = bs[:]
UpperCAmelCase__ : List[str] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase__ )
cs.append(2**8 + n )
n += 1
UpperCAmelCase__ : Any = [chr(lowerCAmelCase__ ) for n in cs]
return dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) )
def a__ ( lowerCAmelCase__ ) -> Union[str, Any]:
UpperCAmelCase__ : str = set()
UpperCAmelCase__ : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : Optional[int] = char
return pairs
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , _A : Optional[int] , _A : List[Any] , _A : int="replace" , _A : List[Any]="<s>" , _A : List[Any]="</s>" , _A : List[Any]="</s>" , _A : Optional[int]="<s>" , _A : List[str]="<unk>" , _A : List[str]="<pad>" , _A : Union[str, Any]="<mask>" , _A : Any=False , **_A : Dict , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else bos_token
UpperCAmelCase__ : Any = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else eos_token
UpperCAmelCase__ : Optional[int] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else sep_token
UpperCAmelCase__ : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else cls_token
UpperCAmelCase__ : int = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else unk_token
UpperCAmelCase__ : Optional[Any] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : Optional[int] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
super().__init__(
errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , )
with open(_A , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase__ : Optional[Any] = json.load(_A )
UpperCAmelCase__ : Any = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__ : List[str] = errors # how to handle errors in decoding
UpperCAmelCase__ : str = bytes_to_unicode()
UpperCAmelCase__ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(_A , encoding='''utf-8''' ) as merges_handle:
UpperCAmelCase__ : str = merges_handle.read().split('''\n''' )[1:-1]
UpperCAmelCase__ : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase__ : Optional[Any] = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase__ : Optional[int] = {}
UpperCAmelCase__ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase__ : List[Any] = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return len(self.encoder )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self : List[Any] , _A : Tuple ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : Optional[Any] = tuple(_A )
UpperCAmelCase__ : Dict = get_pairs(_A )
if not pairs:
return token
while True:
UpperCAmelCase__ : Optional[Any] = min(_A , key=lambda _A : self.bpe_ranks.get(_A , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ : str = bigram
UpperCAmelCase__ : int = []
UpperCAmelCase__ : Tuple = 0
while i < len(_A ):
try:
UpperCAmelCase__ : Optional[int] = word.index(_A , _A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ : Tuple = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ : Optional[Any] = tuple(_A )
UpperCAmelCase__ : List[Any] = new_word
if len(_A ) == 1:
break
else:
UpperCAmelCase__ : Union[str, Any] = get_pairs(_A )
UpperCAmelCase__ : Optional[Any] = ''' '''.join(_A )
UpperCAmelCase__ : List[Any] = word
return word
def lowercase_ ( self : str , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = []
for token in re.findall(self.pat , _A ):
UpperCAmelCase__ : str = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(''' ''' ) )
return bpe_tokens
def lowercase_ ( self : List[str] , _A : Any ):
'''simple docstring'''
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def lowercase_ ( self : int , _A : List[str] ):
'''simple docstring'''
return self.decoder.get(_A )
def lowercase_ ( self : Tuple , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = ''''''.join(_A )
UpperCAmelCase__ : List[str] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowercase_ ( self : int , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ : Tuple = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase__ : Any = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A ) + '''\n''' )
UpperCAmelCase__ : Union[str, Any] = 0
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
UpperCAmelCase__ : List[str] = token_index
writer.write(''' '''.join(_A ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowercase_ ( self : str , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : List[str] = [self.cls_token_id]
UpperCAmelCase__ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def lowercase_ ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = [self.sep_token_id]
UpperCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self : Optional[Any] , _A : Any , _A : Dict=False , **_A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
UpperCAmelCase__ : Tuple = ''' ''' + text
return (text, kwargs)
| 181 | 1 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
snake_case_ = """pt"""
elif is_tf_available():
snake_case_ = """tf"""
else:
snake_case_ = """jax"""
class A_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = PerceiverTokenizer
__UpperCamelCase = False
def UpperCAmelCase__ ( self :Optional[int] ) -> Union[str, Any]:
super().setUp()
UpperCAmelCase = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ ( self :str ) -> List[str]:
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def UpperCAmelCase__ ( self :Union[str, Any] , **lowercase_ :Any ) -> str:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def UpperCAmelCase__ ( self :str , lowercase_ :List[str] , lowercase_ :List[Any]=False , lowercase_ :int=20 , lowercase_ :int=5 ) -> List[Any]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
UpperCAmelCase = []
for i in range(len(_snake_case ) ):
try:
UpperCAmelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=_snake_case )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCAmelCase = list(filter(lambda lowercase_ : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _snake_case ) )
UpperCAmelCase = list(filter(lambda lowercase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_snake_case ) , _snake_case ) )
if max_length is not None and len(_snake_case ) > max_length:
UpperCAmelCase = toks[:max_length]
if min_length is not None and len(_snake_case ) < min_length and len(_snake_case ) > 0:
while len(_snake_case ) < min_length:
UpperCAmelCase = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase = tokenizer.decode(_snake_case , clean_up_tokenization_spaces=_snake_case )
if " " not in output_txt and len(_snake_case ) > 1:
UpperCAmelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_snake_case )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_snake_case )
)
if with_prefix_space:
UpperCAmelCase = " " + output_txt
UpperCAmelCase = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
return output_txt, output_ids
def UpperCAmelCase__ ( self :List[Any] ) -> int:
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = "Unicode €."
UpperCAmelCase = tokenizer(_snake_case )
UpperCAmelCase = [4, 91, 1_16, 1_11, 1_05, 1_17, 1_06, 1_07, 38, 2_32, 1_36, 1_78, 52, 5]
self.assertEqual(encoded['input_ids'] , _snake_case )
# decoding
UpperCAmelCase = tokenizer.decode(_snake_case )
self.assertEqual(_snake_case , '[CLS]Unicode €.[SEP]' )
UpperCAmelCase = tokenizer('e è é ê ë' )
UpperCAmelCase = [4, 1_07, 38, 2_01, 1_74, 38, 2_01, 1_75, 38, 2_01, 1_76, 38, 2_01, 1_77, 5]
self.assertEqual(encoded['input_ids'] , _snake_case )
# decoding
UpperCAmelCase = tokenizer.decode(_snake_case )
self.assertEqual(_snake_case , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def UpperCAmelCase__ ( self :Dict ) -> Union[str, Any]:
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
UpperCAmelCase = [4, 71, 38, 1_14, 1_17, 1_16, 1_09, 38, 1_18, 1_03, 1_20, 1_03, 1_09, 1_20, 1_03, 1_18, 1_10, 38, 1_08, 1_17, 1_20, 38, 1_21, 1_23, 1_15, 1_15, 1_03, 1_20, 1_11, 1_28, 1_03, 1_22, 1_11, 1_17, 1_16, 52, 5, 0]
# fmt: on
UpperCAmelCase = tokenizer(_snake_case , padding=_snake_case , return_tensors=_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
if FRAMEWORK != "jax":
UpperCAmelCase = list(batch.input_ids.numpy()[0] )
else:
UpperCAmelCase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_snake_case , _snake_case )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def UpperCAmelCase__ ( self :List[str] ) -> Optional[int]:
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase = tokenizer(_snake_case , padding=_snake_case , return_tensors=_snake_case )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _snake_case )
self.assertIn('attention_mask' , _snake_case )
self.assertNotIn('decoder_input_ids' , _snake_case )
self.assertNotIn('decoder_attention_mask' , _snake_case )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Dict:
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = [
"Summary of the text.",
"Another summary.",
]
UpperCAmelCase = tokenizer(
text_target=_snake_case , max_length=32 , padding='max_length' , truncation=_snake_case , return_tensors=_snake_case )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def UpperCAmelCase__ ( self :int ) -> Union[str, Any]:
# safety check on max_len default value so we are sure the test works
UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = " He is very happy, UNwant\u00E9d,running"
UpperCAmelCase = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
tokenizer.save_pretrained(_snake_case )
UpperCAmelCase = tokenizer.__class__.from_pretrained(_snake_case )
UpperCAmelCase = after_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
shutil.rmtree(_snake_case )
UpperCAmelCase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(['bim', 'bambam'] )
UpperCAmelCase = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
UpperCAmelCase = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
tokenizer.save_pretrained(_snake_case )
UpperCAmelCase = tokenizer.__class__.from_pretrained(_snake_case )
UpperCAmelCase = after_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase = tokenizer.__class__.from_pretrained(_snake_case , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_snake_case )
def UpperCAmelCase__ ( self :List[str] ) -> Dict:
UpperCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_snake_case )
with open(os.path.join(_snake_case , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
UpperCAmelCase = json.load(_snake_case )
with open(os.path.join(_snake_case , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
UpperCAmelCase = json.load(_snake_case )
UpperCAmelCase = [f"""<extra_id_{i}>""" for i in range(1_25 )]
UpperCAmelCase = added_tokens_extra_ids + [
"an_additional_special_token"
]
UpperCAmelCase = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(_snake_case , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_snake_case , _snake_case )
with open(os.path.join(_snake_case , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_snake_case , _snake_case )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase = tokenizer_class.from_pretrained(
_snake_case , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_snake_case )]
UpperCAmelCase = tokenizer_class.from_pretrained(
_snake_case , additional_special_tokens=_snake_case , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def UpperCAmelCase__ ( self :List[str] ) -> List[Any]:
UpperCAmelCase = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_78] ) , '�' )
def UpperCAmelCase__ ( self :Optional[int] ) -> Dict:
pass
def UpperCAmelCase__ ( self :List[Any] ) -> List[str]:
pass
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Any:
pass
def UpperCAmelCase__ ( self :int ) -> Optional[int]:
pass
def UpperCAmelCase__ ( self :Dict ) -> Tuple:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
UpperCAmelCase = self.get_tokenizers(fast=_snake_case , do_lower_case=_snake_case )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
UpperCAmelCase = tokenizer.convert_tokens_to_string(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
| 351 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ ):
if not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(lowercase_ )
if number < 1:
UpperCAmelCase = F"""Input value of [number={number}] must be > 0"""
raise ValueError(lowercase_ )
UpperCAmelCase = 1
for i in range(1 , lowercase_ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 181 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A ={'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''DeiTFeatureExtractor''']
__A =['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCamelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__A ='''Enter the base and the power separated by a comma: '''
__A, __A =map(int, input(prompt).split(''','''))
__A, __A =map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
__A =res(xa, ya)
__A =res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 19 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
a : int = logging.get_logger(__name__)
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 360 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
a : List[str] = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 128,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase_ ( cls ):
'''simple docstring'''
lowercase__ : Union[str, Any]= TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def UpperCAmelCase_ ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
lowercase__ : List[Any]= BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case__ , repo_id="test-config" , push_to_hub=snake_case__ , use_auth_token=self._token )
lowercase__ : int= BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
lowercase__ : Optional[Any]= BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case__ , repo_id="valid_org/test-config-org" , push_to_hub=snake_case__ , use_auth_token=self._token )
lowercase__ : Any= BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowercase__ : Union[str, Any]= CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
lowercase__ : List[str]= AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowercase__ : str= c.n_embd + 1 # int
lowercase__ : Tuple= c.resid_pdrop + 1.0 # float
lowercase__ : Union[str, Any]= not c.scale_attn_weights # bool
lowercase__ : Optional[Any]= c.summary_type + "foo" # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(snake_case__ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(snake_case__ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(snake_case__ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(snake_case__ , c.summary_type , "mismatch for key: summary_type" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= PretrainedConfig()
lowercase__ : List[str]= [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
snake_case__ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
lowercase__ : Tuple= [key for key, value in config_common_kwargs.items() if value == getattr(snake_case__ , snake_case__ )]
if len(snake_case__ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
F''' {', '.join(snake_case__ )}.''' )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
with self.assertRaises(snake_case__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowercase__ : Optional[int]= BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
lowercase__ : Optional[Any]= BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
lowercase__ : str= mock.Mock()
lowercase__ : Optional[Any]= 500
lowercase__ : Any= {}
lowercase__ : Tuple= HTTPError
lowercase__ : List[Any]= {}
# Download this model to make sure it's in the cache.
lowercase__ : Any= BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=snake_case__ ) as mock_head:
lowercase__ : Any= BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
lowercase__ : Optional[Any]= BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= AutoConfig.from_pretrained("bert-base-cased" )
lowercase__ : Optional[int]= ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(snake_case__ )
lowercase__ : List[Any]= 2
json.dump(configuration.to_dict() , open(os.path.join(snake_case__ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowercase__ : int= AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowercase__ : Optional[int]= ["config.42.0.0.json"]
lowercase__ : int= 768
configuration.save_pretrained(snake_case__ )
shutil.move(os.path.join(snake_case__ , "config.4.0.0.json" ) , os.path.join(snake_case__ , "config.42.0.0.json" ) )
lowercase__ : Optional[Any]= AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowercase__ : Optional[Any]= "hf-internal-testing/test-two-configs"
import transformers as new_transformers
lowercase__ : Optional[Any]= "v4.0.0"
lowercase__, lowercase__ : str= new_transformers.models.auto.AutoConfig.from_pretrained(
snake_case__ , return_unused_kwargs=snake_case__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(snake_case__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowercase__ : Dict= "v3.0.0"
lowercase__ : Tuple= old_transformers.models.auto.AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 150 | 0 |
def lowerCAmelCase_ (lowerCAmelCase__: int = 1 , lowerCAmelCase__: int = 1_0_0_0 ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = 1
UpperCAmelCase_: Optional[Any] = 0
for divide_by_number in range(snake_case_ , digit + 1 ):
UpperCAmelCase_: Optional[int] = []
UpperCAmelCase_: Union[str, Any] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(snake_case_ ):
UpperCAmelCase_: str = len(snake_case_ )
UpperCAmelCase_: Union[str, Any] = divide_by_number
else:
has_been_divided.append(snake_case_ )
UpperCAmelCase_: Any = now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] ) -> list[int]: # This function is recursive
__snake_case = len(snake_case_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__snake_case = array[0]
__snake_case = False
__snake_case = 1
__snake_case = []
while not is_found and i < array_length:
if array[i] < pivot:
__snake_case = True
__snake_case = [element for element in array[i:] if element >= array[i]]
__snake_case = longest_subsequence(snake_case_ )
if len(snake_case_ ) > len(snake_case_ ):
__snake_case = temp_array
else:
i += 1
__snake_case = [element for element in array[1:] if element >= pivot]
__snake_case = [pivot, *longest_subsequence(snake_case_ )]
if len(snake_case_ ) > len(snake_case_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def lowerCAmelCase_ (lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = tf.convert_to_tensor(lowerCAmelCase__ )
UpperCAmelCase_: Dict = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowerCAmelCase_ (lowerCAmelCase__: str ):
"""simple docstring"""
UpperCAmelCase_: Any = tf.convert_to_tensor(lowerCAmelCase__ )
UpperCAmelCase_: List[str] = tf.cast(math.pi , x.dtype )
UpperCAmelCase_: List[Any] = tf.cast(0.044715 , x.dtype )
UpperCAmelCase_: List[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCAmelCase__ , 3 )) ))
return x * cdf
def lowerCAmelCase_ (lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
UpperCAmelCase_: int = tf.convert_to_tensor(lowerCAmelCase__ )
return x * tf.tanh(tf.math.softplus(lowerCAmelCase__ ) )
def lowerCAmelCase_ (lowerCAmelCase__: Tuple ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = tf.convert_to_tensor(lowerCAmelCase__ )
UpperCAmelCase_: Tuple = tf.cast(0.044715 , x.dtype )
UpperCAmelCase_: Union[str, Any] = tf.cast(0.7978845608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = tf.convert_to_tensor(lowerCAmelCase__ )
UpperCAmelCase_: int = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
return tf.clip_by_value(_gelu(lowerCAmelCase__ ) , -1_0 , 1_0 )
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: Optional[Any]=-1 ):
"""simple docstring"""
UpperCAmelCase_: Any = tf.split(lowerCAmelCase__ , 2 , axis=lowerCAmelCase__ )
return a * tf.math.sigmoid(lowerCAmelCase__ )
if version.parse(tf.version.VERSION) >= version.parse('2.4'):
def lowerCAmelCase_ (lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
return tf.keras.activations.gelu(lowerCAmelCase__ , approximate=lowerCAmelCase__ )
a : List[Any] = tf.keras.activations.gelu
a : str = approximate_gelu_wrap
else:
a : Any = _gelu
a : List[str] = _gelu_new
a : Optional[Any] = {
'gelu': gelu,
'gelu_10': gelu_aa,
'gelu_fast': gelu_fast,
'gelu_new': gelu_new,
'glu': glu,
'mish': mish,
'quick_gelu': quick_gelu,
'relu': tf.keras.activations.relu,
'sigmoid': tf.keras.activations.sigmoid,
'silu': tf.keras.activations.swish,
'swish': tf.keras.activations.swish,
'tanh': tf.keras.activations.tanh,
}
def lowerCAmelCase_ (lowerCAmelCase__: List[str] ):
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 366 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
UpperCAmelCase_: str = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
UpperCAmelCase_: Tuple = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
UpperCAmelCase_: List[str] = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCAmelCase_: Optional[Any] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
UpperCAmelCase_: Optional[Any] = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(lowerCAmelCase__ )-1}' )
if "norm" in key:
UpperCAmelCase_: Dict = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCAmelCase_: Any = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
UpperCAmelCase_: str = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(lowerCAmelCase__ )-1}' )
if "layer_norm1" in key:
UpperCAmelCase_: Tuple = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
UpperCAmelCase_: int = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
UpperCAmelCase_: Any = key[key.find("""block""" ) + len("""block""" )]
UpperCAmelCase_: Optional[Any] = key.replace(F'block{idx}' , F'block.{int(lowerCAmelCase__ )-1}' )
if "attn.q" in key:
UpperCAmelCase_: Dict = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
UpperCAmelCase_: Tuple = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
UpperCAmelCase_: str = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
UpperCAmelCase_: Tuple = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
UpperCAmelCase_: Optional[int] = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
UpperCAmelCase_: Optional[int] = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
UpperCAmelCase_: Tuple = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
UpperCAmelCase_: List[str] = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCAmelCase_: Optional[int] = key[key.find("""linear_c""" ) + len("""linear_c""" )]
UpperCAmelCase_: List[str] = key.replace(F'linear_c{idx}' , F'linear_c.{int(lowerCAmelCase__ )-1}' )
if "bot_conv" in key:
UpperCAmelCase_: Optional[Any] = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
UpperCAmelCase_: Any = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
UpperCAmelCase_: Optional[Any] = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
UpperCAmelCase_: Dict = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
UpperCAmelCase_: Any = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
UpperCAmelCase_: Union[str, Any] = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
UpperCAmelCase_: List[Any] = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
UpperCAmelCase_: Union[str, Any] = key.replace("""module.last_layer_depth""" , """head.head""" )
UpperCAmelCase_: Union[str, Any] = value
return new_state_dict
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: str ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCAmelCase_: str = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
UpperCAmelCase_: Tuple = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
UpperCAmelCase_: Any = kv_weight[
: config.hidden_sizes[i], :
]
UpperCAmelCase_: Tuple = kv_bias[: config.hidden_sizes[i]]
UpperCAmelCase_: Optional[int] = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCAmelCase_: int = kv_bias[config.hidden_sizes[i] :]
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase_: List[Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return image
@torch.no_grad()
def lowerCAmelCase_ (lowerCAmelCase__: List[str] , lowerCAmelCase__: List[Any] , lowerCAmelCase__: List[Any]=False , lowerCAmelCase__: Optional[Any]=None ):
"""simple docstring"""
UpperCAmelCase_: str = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] )
# load image processor (only resize + rescale)
UpperCAmelCase_: Dict = GLPNImageProcessor()
# prepare image
UpperCAmelCase_: List[str] = prepare_img()
UpperCAmelCase_: List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
UpperCAmelCase_: Any = torch.load(lowerCAmelCase__ , map_location=torch.device("""cpu""" ) )
# rename keys
UpperCAmelCase_: Optional[Any] = rename_keys(lowerCAmelCase__ )
# key and value matrices need special treatment
read_in_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# create HuggingFace model and load state dict
UpperCAmelCase_: Dict = GLPNForDepthEstimation(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
# forward pass
UpperCAmelCase_: Any = model(lowerCAmelCase__ )
UpperCAmelCase_: Union[str, Any] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCAmelCase_: List[str] = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCAmelCase_: List[str] = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
UpperCAmelCase_: Any = torch.Size([1, 4_8_0, 6_4_0] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase__ , )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
a : List[Any] = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 82 | 0 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class UpperCAmelCase__ :
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = """dict"""
UpperCamelCase = None
UpperCamelCase = field(default="""Translation""" ,init=A__ ,repr=A__ )
def __call__( self : Tuple ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def snake_case__ ( self : Any ):
'''simple docstring'''
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class UpperCAmelCase__ :
'''simple docstring'''
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = """dict"""
UpperCamelCase = None
UpperCamelCase = field(default="""TranslationVariableLanguages""" ,init=A__ ,repr=A__ )
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = sorted(set(self.languages ) ) if self.languages else None
__UpperCAmelCase : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self : Dict ):
'''simple docstring'''
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def snake_case__ ( self : List[Any] , a_ : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = set(self.languages )
if self.languages and set(_A ) - lang_set:
raise ValueError(
F'Some languages in example ({", ".join(sorted(set(_A ) - lang_set ) )}) are not in valid set ({", ".join(_A )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__UpperCAmelCase : Union[str, Any] = []
for lang, text in translation_dict.items():
if isinstance(_A , _A ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__UpperCAmelCase : Optional[int] = zip(*sorted(_A ) )
return {"language": languages, "translation": translations}
def snake_case__ ( self : Dict ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 226 | def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
while b:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = b, a % b
return a
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(lowerCAmelCase , a % b )
def _snake_case ( ):
"""simple docstring"""
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 18 | 0 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
_lowerCamelCase : Dict = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def a_ ( __lowercase : List[str] ) -> Any:
_snake_case = list(s_dict.keys() )
for key in keys:
_snake_case = r'.*/layers_(\d+)'
_snake_case = key
if re.match(_lowercase , _lowercase ):
_snake_case = re.sub(r'layers_(\d+)' , r'block/\1/layer' , _lowercase )
_snake_case = r'(encoder|decoder)\/'
if re.match(_lowercase , _lowercase ):
_snake_case = re.match(_lowercase , _lowercase ).groups()
if groups[0] == "encoder":
_snake_case = re.sub(r'/mlp/' , r'/1/mlp/' , _lowercase )
_snake_case = re.sub(r'/pre_mlp_layer_norm/' , r'/1/layer_norm/' , _lowercase )
elif groups[0] == "decoder":
_snake_case = re.sub(r'/mlp/' , r'/2/mlp/' , _lowercase )
_snake_case = re.sub(r'/pre_mlp_layer_norm/' , r'/2/layer_norm/' , _lowercase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_snake_case = new_key.replace(_lowercase , _lowercase )
print(f'''{key} -> {new_key}''' )
_snake_case = s_dict.pop(_lowercase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_snake_case = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_snake_case = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_snake_case = s_dict[key].shape[0]
_snake_case = s_dict[key]
for idx in range(_lowercase ):
_snake_case = expert_weihts[idx]
print(f'''{key} -> {key.replace('expert/' , 'nested fstring' )}''' )
s_dict.pop(_lowercase )
return s_dict
_lowerCamelCase : Optional[int] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def a_ ( __lowercase : int , __lowercase : str ) -> str:
import regex as re
with open(_lowercase , 'r' ) as f:
_snake_case = f.read()
_snake_case = re.findall(r'(.*) = ([0-9.]*)' , _lowercase )
_snake_case = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_snake_case = float(_lowercase ) if '.' in value else int(_lowercase )
_snake_case = re.findall(r'(.*activations) = \(\'(.*)\',\)' , _lowercase )[0]
_snake_case = str(activation[1] )
_snake_case = num_experts
_snake_case = SwitchTransformersConfig(**_lowercase )
return config
def a_ ( __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Optional[int]=None , __lowercase : str="./" , __lowercase : Any=8 ) -> List[Any]:
print(f'''Loading flax weights from : {flax_checkpoint_path}''' )
_snake_case = checkpoints.load_tax_checkpoint(_lowercase )
if gin_file is not None:
_snake_case = convert_gin_to_config(_lowercase , _lowercase )
else:
_snake_case = SwitchTransformersConfig.from_pretrained(_lowercase )
_snake_case = SwitchTransformersForConditionalGeneration(_lowercase )
_snake_case = flax_params['target']
_snake_case = flatten_dict(_lowercase , sep='/' )
_snake_case = rename_keys(_lowercase )
_snake_case = unflatten_dict(_lowercase , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_lowercase , _lowercase )
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(_lowercase )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
_lowerCamelCase : int = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
) | 359 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
_lowerCamelCase : List[str] = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def a_ ( __lowercase : int ) -> Optional[int]:
_snake_case = EfficientNetConfig()
_snake_case = CONFIG_MAP[model_name]['hidden_dim']
_snake_case = CONFIG_MAP[model_name]['width_coef']
_snake_case = CONFIG_MAP[model_name]['depth_coef']
_snake_case = CONFIG_MAP[model_name]['image_size']
_snake_case = CONFIG_MAP[model_name]['dropout_rate']
_snake_case = CONFIG_MAP[model_name]['dw_padding']
_snake_case = 'huggingface/label-files'
_snake_case = 'imagenet-1k-id2label.json'
_snake_case = 1_000
_snake_case = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='dataset' ) , 'r' ) )
_snake_case = {int(__lowercase ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def a_ ( ) -> Any:
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
def a_ ( __lowercase : Union[str, Any] ) -> Tuple:
_snake_case = CONFIG_MAP[model_name]['image_size']
_snake_case = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=__lowercase , )
return preprocessor
def a_ ( __lowercase : str ) -> List[Any]:
_snake_case = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
_snake_case = sorted(set(__lowercase ) )
_snake_case = len(__lowercase )
_snake_case = {b: str(__lowercase ) for b, i in zip(__lowercase , range(__lowercase ) )}
_snake_case = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
_snake_case = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
_snake_case = {}
for item in rename_keys:
if item[0] in original_param_names:
_snake_case = 'efficientnet.' + item[1]
_snake_case = 'classifier.weight'
_snake_case = 'classifier.bias'
return key_mapping
def a_ ( __lowercase : Any , __lowercase : Any , __lowercase : Any ) -> Optional[Any]:
for key, value in tf_params.items():
if "normalization" in key:
continue
_snake_case = key_mapping[key]
if "_conv" in key and "kernel" in key:
_snake_case = torch.from_numpy(__lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
_snake_case = torch.from_numpy(__lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
_snake_case = torch.from_numpy(np.transpose(__lowercase ) )
else:
_snake_case = torch.from_numpy(__lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__lowercase )
@torch.no_grad()
def a_ ( __lowercase : List[Any] , __lowercase : Any , __lowercase : int , __lowercase : str ) -> Dict:
_snake_case = model_classes[model_name](
include_top=__lowercase , weights='imagenet' , input_tensor=__lowercase , input_shape=__lowercase , pooling=__lowercase , classes=1_000 , classifier_activation='softmax' , )
_snake_case = original_model.trainable_variables
_snake_case = original_model.non_trainable_variables
_snake_case = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_snake_case = param.numpy()
_snake_case = list(tf_params.keys() )
# Load HuggingFace model
_snake_case = get_efficientnet_config(__lowercase )
_snake_case = EfficientNetForImageClassification(__lowercase ).eval()
_snake_case = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
_snake_case = rename_keys(__lowercase )
replace_params(__lowercase , __lowercase , __lowercase )
# Initialize preprocessor and preprocess input image
_snake_case = convert_image_processor(__lowercase )
_snake_case = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
_snake_case = hf_model(**__lowercase )
_snake_case = outputs.logits.detach().numpy()
# Original model inference
_snake_case = False
_snake_case = CONFIG_MAP[model_name]['image_size']
_snake_case = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
_snake_case = image.img_to_array(__lowercase )
_snake_case = np.expand_dims(__lowercase , axis=0 )
_snake_case = original_model.predict(__lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__lowercase , __lowercase , atol=1E-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(__lowercase ):
os.mkdir(__lowercase )
# Save converted model and image processor
hf_model.save_pretrained(__lowercase )
preprocessor.save_pretrained(__lowercase )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
_snake_case = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(__lowercase )
hf_model.push_to_hub(__lowercase )
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
_lowerCamelCase : List[str] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub) | 130 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 110 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = 0
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32' )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'preprocessor_config.json'
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_lowerCAmelCase , 'w' ) , )
json.dump({'model_type': 'clip'} , open(_lowerCAmelCase , 'w' ) )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'preprocessor_config.json'
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_lowerCAmelCase , 'w' ) , )
json.dump({'model_type': 'clip'} , open(_lowerCAmelCase , 'w' ) )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'preprocessor_config.json'
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_lowerCAmelCase , 'w' ) , )
json.dump({'model_type': 'clip'} , open(_lowerCAmelCase , 'w' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase ).to_dict()
config_dict.pop('image_processor_type' )
SCREAMING_SNAKE_CASE_ = CLIPImageProcessor(**_lowerCAmelCase )
# save in new folder
model_config.save_pretrained(_lowerCAmelCase )
config.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE_ = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_lowerCAmelCase , 'w' ) , )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
with self.assertRaisesRegex(
_lowerCAmelCase , 'clip-base is not a local folder and is not a valid model identifier' ):
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained('clip-base' )
def lowerCAmelCase_ ( self : List[Any] ):
with self.assertRaisesRegex(
_lowerCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase , revision='aaaaaa' )
def lowerCAmelCase_ ( self : str ):
with self.assertRaisesRegex(
_lowerCAmelCase , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model' )
def lowerCAmelCase_ ( self : Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase , trust_remote_code=_lowerCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , 'NewImageProcessor' )
def lowerCAmelCase_ ( self : Optional[Any] ):
try:
AutoConfig.register('custom' , _lowerCAmelCase )
AutoImageProcessor.register(_lowerCAmelCase , _lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCAmelCase ):
AutoImageProcessor.register(_lowerCAmelCase , _lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'preprocessor_config.json'
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_lowerCAmelCase , 'w' ) , )
json.dump({'model_type': 'clip'} , open(_lowerCAmelCase , 'w' ) )
SCREAMING_SNAKE_CASE_ = CustomImageProcessor.from_pretrained(_lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase_ ( self : Union[str, Any] ):
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = True
try:
AutoConfig.register('custom' , _lowerCAmelCase )
AutoImageProcessor.register(_lowerCAmelCase , _lowerCAmelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(not hasattr(_lowerCAmelCase , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] | 225 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Optional[Any] = 'encoder-decoder'
lowerCAmelCase : Any = True
def __init__( self : str ,**_UpperCAmelCase : List[Any] ):
super().__init__(**_UpperCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_a : int = kwargs.pop('encoder' )
_a : List[str] = encoder_config.pop('model_type' )
_a : Any = kwargs.pop('decoder' )
_a : Any = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
_a : Optional[int] = AutoConfig.for_model(_UpperCAmelCase ,**_UpperCAmelCase )
_a : Dict = AutoConfig.for_model(_UpperCAmelCase ,**_UpperCAmelCase )
_a : Optional[Any] = True
@classmethod
def __lowercase ( cls : str ,_UpperCAmelCase : PretrainedConfig ,_UpperCAmelCase : PretrainedConfig ,**_UpperCAmelCase : Tuple ):
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
_a : Tuple = True
_a : Tuple = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**_UpperCAmelCase )
def __lowercase ( self : Tuple ):
_a : str = copy.deepcopy(self.__dict__ )
_a : Optional[int] = self.encoder.to_dict()
_a : Dict = self.decoder.to_dict()
_a : Dict = self.__class__.model_type
return output
| 107 |
'''simple docstring'''
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : int ,_UpperCAmelCase : int ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ):
self.assertEqual(len(_UpperCAmelCase ) ,len(_UpperCAmelCase ) )
for a, b in zip(_UpperCAmelCase ,_UpperCAmelCase ):
self.assertAlmostEqual(_UpperCAmelCase ,_UpperCAmelCase ,delta=_UpperCAmelCase )
def __lowercase ( self : int ):
_a : int = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_UpperCAmelCase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step ,3 )
self.assertEqual(len(accumulator.gradients ) ,1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() ,[-2.0, 5.0] ,tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step ,0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() ,[0.0, 0.0] ,tol=1E-2 )
def __lowercase ( self : Any ):
_a : int = None
ops.enable_eager_execution_internal()
_a : Optional[int] = tf.config.list_physical_devices('CPU' )
if len(_UpperCAmelCase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] ,[tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
_a : Tuple = tf.config.list_logical_devices(device_type='CPU' )
_a : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
_a : Tuple = GradientAccumulator()
_a : List[Any] = tf.Variable([4.0, 3.0] )
_a , _a : Dict = create_optimizer(5E-5 ,10 ,5 )
_a : Tuple = tf.Variable([0.0, 0.0] ,trainable=_UpperCAmelCase )
def accumulate_on_replica(_UpperCAmelCase : str ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients ,[variable] ) ) )
@tf.function
def accumulate(_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ):
with strategy.scope():
_a : Union[str, Any] = strategy.experimental_local_results(_UpperCAmelCase )
local_variables[0].assign(_UpperCAmelCase )
local_variables[1].assign(_UpperCAmelCase )
strategy.run(_UpperCAmelCase ,args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_UpperCAmelCase )
def _check_local_values(_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] ):
_a : List[Any] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() ,_UpperCAmelCase ,tol=1E-2 )
self.assertListAlmostEqual(values[1].value() ,_UpperCAmelCase ,tol=1E-2 )
accumulate([1.0, 2.0] ,[-1.0, 1.0] )
accumulate([3.0, -1.0] ,[-1.0, -1.0] )
accumulate([-2.0, 2.0] ,[3.0, -2.0] )
self.assertEqual(accumulator.step ,3 )
_check_local_values([2.0, 3.0] ,[1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() ,[4.0, 3.0] ,tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step ,0 )
_check_local_values([0.0, 0.0] ,[0.0, 0.0] )
| 107 | 1 |
import argparse
import os
import re
snake_case_ : Union[str, Any] = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
snake_case_ : Any = re.compile(r"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
snake_case_ : str = re.compile(r"\s*\(\s*\"(\S[^\"]+)\"")
def A (__A : Tuple , __A : bool = False ) -> Dict:
"""simple docstring"""
with open(__A , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ = f.read()
UpperCAmelCase_ = content.split('''\n''' )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
while line_idx < len(__A ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
UpperCAmelCase_ = len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
UpperCAmelCase_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
UpperCAmelCase_ = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
UpperCAmelCase_ = sorted(__A , key=lambda __A : _re_identifier.search(__A ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(__A ) )
elif "\n".join(__A ) != content:
return True
def A (__A : bool = False ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = [os.path.join(__A , __A ) for f in os.listdir(__A ) if f.endswith('''.py''' )]
UpperCAmelCase_ = [sort_auto_mapping(__A , overwrite=__A ) for fname in fnames]
if not overwrite and any(__A ):
UpperCAmelCase_ = [f for f, d in zip(__A , __A ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {", ".join(__A )}. Run `make style` to fix"""
''' this.''' )
if __name__ == "__main__":
snake_case_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
snake_case_ : Union[str, Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 51 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __snake_case :
@staticmethod
def lowerCamelCase ( *_snake_case : Optional[int] , **_snake_case : int):
"""simple docstring"""
pass
def A (__A : Image ) -> str:
"""simple docstring"""
UpperCAmelCase_ = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : Tuple = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase ( self : Tuple , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = DepthEstimationPipeline(model=_snake_case , image_processor=_snake_case)
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase ( self : str , _snake_case : Optional[int] , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)} , _snake_case)
import datasets
UpperCAmelCase_ = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''')
UpperCAmelCase_ = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
])
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
] , _snake_case , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''')
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
pass
@slow
@require_torch
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''Intel/dpt-large'''
UpperCAmelCase_ = pipeline('''depth-estimation''' , model=_snake_case)
UpperCAmelCase_ = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''')
UpperCAmelCase_ = hashimage(outputs['''depth'''])
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item()) , 2_9.3_0_4)
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item()) , 2.6_6_2)
@require_torch
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''')
| 51 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = params
UpperCamelCase__ = np.array(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = np.array([len(_SCREAMING_SNAKE_CASE ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __lowerCAmelCase ):
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
return len(self.lengths )
def _lowerCamelCase ( self ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.params.max_model_input_size
UpperCamelCase__ = self.lengths > max_len
logger.info(f"""Splitting {sum(_SCREAMING_SNAKE_CASE )} too long sequences.""" )
def divide_chunks(__lowerCAmelCase , __lowerCAmelCase ):
return [l[i : i + n] for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )]
UpperCamelCase__ = []
UpperCamelCase__ = []
if self.params.mlm:
UpperCamelCase__ , UpperCamelCase__ = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
UpperCamelCase__ , UpperCamelCase__ = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
UpperCamelCase__ = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
UpperCamelCase__ = np.insert(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE )
if sub_s[-1] != sep_id:
UpperCamelCase__ = np.insert(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
assert len(_SCREAMING_SNAKE_CASE ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_SCREAMING_SNAKE_CASE )
new_tok_ids.extend(_SCREAMING_SNAKE_CASE )
new_lengths.extend([len(_SCREAMING_SNAKE_CASE ) for l in sub_seqs] )
UpperCamelCase__ = np.array(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = np.array(_SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self ):
UpperCamelCase__ = len(self )
UpperCamelCase__ = self.lengths > 11
UpperCamelCase__ = self.token_ids[indices]
UpperCamelCase__ = self.lengths[indices]
UpperCamelCase__ = len(self )
logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def _lowerCamelCase ( self ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
UpperCamelCase__ = self.params.special_tok_ids["""unk_token"""]
UpperCamelCase__ = len(self )
UpperCamelCase__ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
UpperCamelCase__ = (unk_occs / self.lengths) < 0.5
UpperCamelCase__ = self.token_ids[indices]
UpperCamelCase__ = self.lengths[indices]
UpperCamelCase__ = len(self )
logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def _lowerCamelCase ( self ):
if not self.params.is_master:
return
logger.info(f"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = [t[0] for t in batch]
UpperCamelCase__ = [t[1] for t in batch]
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
# Max for paddings
UpperCamelCase__ = max(_SCREAMING_SNAKE_CASE )
# Pad token ids
if self.params.mlm:
UpperCamelCase__ = self.params.special_tok_ids["""pad_token"""]
else:
UpperCamelCase__ = self.params.special_tok_ids["""unk_token"""]
UpperCamelCase__ = [list(t.astype(_SCREAMING_SNAKE_CASE ) ) + [pad_idx] * (max_seq_len_ - len(_SCREAMING_SNAKE_CASE )) for t in token_ids]
assert len(tk_ ) == len(_SCREAMING_SNAKE_CASE )
assert all(len(_SCREAMING_SNAKE_CASE ) == max_seq_len_ for t in tk_ )
UpperCamelCase__ = torch.tensor(tk_ ) # (bs, max_seq_len_)
UpperCamelCase__ = torch.tensor(_SCREAMING_SNAKE_CASE ) # (bs)
return tk_t, lg_t
| 355 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Dict = """speech_to_text_2"""
snake_case : List[Any] = ["""past_key_values"""]
snake_case : List[str] = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __lowerCAmelCase=10000 , __lowerCAmelCase=6 , __lowerCAmelCase=2048 , __lowerCAmelCase=4 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase="relu" , __lowerCAmelCase=256 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=2 , __lowerCAmelCase=True , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase=1024 , **__lowerCAmelCase , ):
UpperCamelCase__ = vocab_size
UpperCamelCase__ = d_model
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = activation_function
UpperCamelCase__ = init_std
UpperCamelCase__ = decoder_layerdrop
UpperCamelCase__ = use_cache
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase__ = max_target_positions
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
| 87 | 0 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def _lowercase ( __snake_case ) -> float:
if num <= 0:
raise ValueError("math domain error" )
return quad(SCREAMING_SNAKE_CASE__ ,0 ,SCREAMING_SNAKE_CASE__ ,args=(SCREAMING_SNAKE_CASE__) )[0]
def _lowercase ( __snake_case ,__snake_case ) -> float:
return math.pow(SCREAMING_SNAKE_CASE__ ,z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod() | 269 |
'''simple docstring'''
from math import factorial
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int:
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(SCREAMING_SNAKE_CASE__ ) // (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"If a class of 40 students must be arranged into groups of",
f'''4 for group projects, there are {combinations(40, 4)} ways''',
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
f'''are {combinations(10, 3)} ways that first, second and''',
"third place can be awarded.",
)
| 125 | 0 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def A ( _lowerCamelCase="" ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
return os.path.join(UpperCAmelCase__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = torch.rand(12, dtype=torch.floataa) - 0.5
_lowerCAmelCase : Optional[Any] = AgentAudio(lowercase_)
_lowerCAmelCase : List[Any] = str(agent_type.to_string())
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase_, agent_type.to_raw(), atol=1E-4))
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowercase_))
# Ensure that the file contains the same value as the original tensor
_lowerCAmelCase : Tuple = sf.read(lowercase_)
self.assertTrue(torch.allclose(lowercase_, torch.tensor(lowercase_), atol=1E-4))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = torch.rand(12, dtype=torch.floataa) - 0.5
_lowerCAmelCase : Dict = get_new_path(suffix=".wav")
sf.write(lowercase_, lowercase_, 1_6000)
_lowerCAmelCase : Optional[Any] = AgentAudio(lowercase_)
self.assertTrue(torch.allclose(lowercase_, agent_type.to_raw(), atol=1E-4))
self.assertEqual(agent_type.to_string(), lowercase_)
@require_vision
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = torch.randint(0, 256, (64, 64, 3))
_lowerCAmelCase : int = AgentImage(lowercase_)
_lowerCAmelCase : List[Any] = str(agent_type.to_string())
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase_, agent_type._tensor, atol=1E-4))
self.assertIsInstance(agent_type.to_raw(), Image.Image)
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = Path(get_tests_dir("fixtures/tests_samples/COCO")) / """000000039769.png"""
_lowerCAmelCase : Optional[Any] = Image.open(lowercase_)
_lowerCAmelCase : Union[str, Any] = AgentImage(lowercase_)
self.assertTrue(path.samefile(agent_type.to_string()))
self.assertTrue(image == agent_type.to_raw())
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = Path(get_tests_dir("fixtures/tests_samples/COCO")) / """000000039769.png"""
_lowerCAmelCase : Dict = Image.open(lowercase_)
_lowerCAmelCase : Dict = AgentImage(lowercase_)
self.assertFalse(path.samefile(agent_type.to_string()))
self.assertTrue(image == agent_type.to_raw())
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_))
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = """Hey!"""
_lowerCAmelCase : Any = AgentText(lowercase_)
self.assertEqual(lowercase_, agent_type.to_string())
self.assertEqual(lowercase_, agent_type.to_raw())
self.assertEqual(lowercase_, lowercase_)
| 371 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCAmelCase : str = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase : int = ""
else:
_lowerCAmelCase : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase : Dict = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_lowerCAmelCase : Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Dict = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCAmelCase : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase : int = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase : Optional[int] = in_proj_bias[-config.hidden_size :]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = dct.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ViTConfig()
_lowerCAmelCase : str = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCAmelCase : str = True
_lowerCAmelCase : List[str] = int(vit_name[-12:-10] )
_lowerCAmelCase : str = int(vit_name[-9:-6] )
else:
_lowerCAmelCase : List[str] = 1_000
_lowerCAmelCase : int = "huggingface/label-files"
_lowerCAmelCase : Dict = "imagenet-1k-id2label.json"
_lowerCAmelCase : Dict = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Optional[int] = idalabel
_lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : str = int(vit_name[-6:-4] )
_lowerCAmelCase : List[str] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCAmelCase : str = 192
_lowerCAmelCase : Union[str, Any] = 768
_lowerCAmelCase : str = 12
_lowerCAmelCase : Any = 3
elif vit_name[9:].startswith("small" ):
_lowerCAmelCase : Any = 384
_lowerCAmelCase : Any = 1_536
_lowerCAmelCase : List[str] = 12
_lowerCAmelCase : Tuple = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCAmelCase : Optional[Any] = 768
_lowerCAmelCase : str = 2_304
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : List[str] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCAmelCase : Optional[Any] = 1_024
_lowerCAmelCase : List[str] = 4_096
_lowerCAmelCase : Dict = 24
_lowerCAmelCase : int = 16
elif vit_name[4:].startswith("huge" ):
_lowerCAmelCase : Union[str, Any] = 1_280
_lowerCAmelCase : Optional[int] = 5_120
_lowerCAmelCase : Optional[Any] = 32
_lowerCAmelCase : str = 16
# load original model from timm
_lowerCAmelCase : List[Any] = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase : List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCAmelCase : Optional[int] = ViTModel(_lowerCamelCase ).eval()
else:
_lowerCAmelCase : Optional[int] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCAmelCase : Tuple = DeiTImageProcessor(size=config.image_size )
else:
_lowerCAmelCase : Dict = ViTImageProcessor(size=config.image_size )
_lowerCAmelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCAmelCase : Union[str, Any] = encoding["pixel_values"]
_lowerCAmelCase : List[str] = model(_lowerCamelCase )
if base_model:
_lowerCAmelCase : List[str] = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
_lowerCAmelCase : Any = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 300 | 0 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = ["""model.decoder.embed_positions.weights"""]
def a__ ( _UpperCamelCase : Optional[int] ):
if "emb" in name:
__lowerCamelCase = name.replace('''emb''' ,'''model.decoder.embed_tokens''' )
if "transformer" in name:
__lowerCamelCase = name.replace('''transformer''' ,'''model.decoder''' )
if "cross_attention" in name:
__lowerCamelCase = name.replace('''cross_attention''' ,'''encoder_attn''' )
if "linear1" in name:
__lowerCamelCase = name.replace('''linear1''' ,'''fc1''' )
if "linear2" in name:
__lowerCamelCase = name.replace('''linear2''' ,'''fc2''' )
if "norm1" in name:
__lowerCamelCase = name.replace('''norm1''' ,'''self_attn_layer_norm''' )
if "norm_cross" in name:
__lowerCamelCase = name.replace('''norm_cross''' ,'''encoder_attn_layer_norm''' )
if "norm2" in name:
__lowerCamelCase = name.replace('''norm2''' ,'''final_layer_norm''' )
if "out_norm" in name:
__lowerCamelCase = name.replace('''out_norm''' ,'''model.decoder.layer_norm''' )
if "linears" in name:
__lowerCamelCase = name.replace('''linears''' ,'''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
__lowerCamelCase = name.replace('''condition_provider.conditioners.description.output_proj''' ,'''enc_to_dec_proj''' )
return name
def a__ ( _UpperCamelCase : OrderedDict ,_UpperCamelCase : int ):
__lowerCamelCase = list(state_dict.keys() )
__lowerCamelCase = {}
for key in keys:
__lowerCamelCase = state_dict.pop(snake_case_ )
__lowerCamelCase = rename_keys(snake_case_ )
if "in_proj_weight" in key:
# split fused qkv proj
__lowerCamelCase = val[:hidden_size, :]
__lowerCamelCase = val[hidden_size : 2 * hidden_size, :]
__lowerCamelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowerCamelCase = val
else:
__lowerCamelCase = val
return state_dict, enc_dec_proj_state_dict
def a__ ( _UpperCamelCase : str ):
if checkpoint == "small":
# default config values
__lowerCamelCase = 10_24
__lowerCamelCase = 24
__lowerCamelCase = 16
elif checkpoint == "medium":
__lowerCamelCase = 15_36
__lowerCamelCase = 48
__lowerCamelCase = 24
elif checkpoint == "large":
__lowerCamelCase = 20_48
__lowerCamelCase = 48
__lowerCamelCase = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
__lowerCamelCase = MusicgenDecoderConfig(
hidden_size=snake_case_ ,ffn_dim=hidden_size * 4 ,num_hidden_layers=snake_case_ ,num_attention_heads=snake_case_ ,)
return config
@torch.no_grad()
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : List[Any]=None ,_UpperCamelCase : Any=None ,_UpperCamelCase : str="cpu" ):
__lowerCamelCase = MusicGen.get_pretrained(snake_case_ ,device=snake_case_ )
__lowerCamelCase = decoder_config_from_checkpoint(snake_case_ )
__lowerCamelCase = fairseq_model.lm.state_dict()
__lowerCamelCase = rename_state_dict(
snake_case_ ,hidden_size=decoder_config.hidden_size )
__lowerCamelCase = TaEncoderModel.from_pretrained('''t5-base''' )
__lowerCamelCase = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
__lowerCamelCase = MusicgenForCausalLM(snake_case_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowerCamelCase = decoder.load_state_dict(snake_case_ ,strict=snake_case_ )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(snake_case_ )
if len(snake_case_ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(snake_case_ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
__lowerCamelCase = MusicgenForConditionalGeneration(text_encoder=snake_case_ ,audio_encoder=snake_case_ ,decoder=snake_case_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(snake_case_ )
# check we can do a forward pass
__lowerCamelCase = torch.arange(0 ,8 ,dtype=torch.long ).reshape(2 ,-1 )
__lowerCamelCase = input_ids.reshape(2 * 4 ,-1 )
with torch.no_grad():
__lowerCamelCase = model(input_ids=snake_case_ ,decoder_input_ids=snake_case_ ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
__lowerCamelCase = AutoTokenizer.from_pretrained('''t5-base''' )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' ,padding_side='''left''' )
__lowerCamelCase = MusicgenProcessor(feature_extractor=snake_case_ ,tokenizer=snake_case_ )
# set the appropriate bos/pad token ids
__lowerCamelCase = 20_48
__lowerCamelCase = 20_48
# set other default generation config params
__lowerCamelCase = int(30 * audio_encoder.config.frame_rate )
__lowerCamelCase = True
__lowerCamelCase = 3.0
if pytorch_dump_folder is not None:
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(snake_case_ )
processor.push_to_hub(snake_case_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
a_ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 330 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""PerceiverFeatureExtractor"""]
a_ = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 179 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase : int = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class snake_case ( UpperCAmelCase , unittest.TestCase ):
__magic_name__ = BartphoTokenizer
__magic_name__ = False
__magic_name__ = True
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
a : Any = ['▁This', '▁is', '▁a', '▁t', 'est']
a : List[Any] = dict(zip(A , range(len(A ) ) ) )
a : int = {'unk_token': '<unk>'}
a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'] )
with open(self.monolingual_vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
a : Optional[int] = BartphoTokenizer(A , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : Dict , **A : str ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **A )
def lowerCamelCase__ ( self : Optional[int] , A : Dict ):
'''simple docstring'''
a : Tuple = 'This is a là test'
a : List[Any] = 'This is a<unk><unk> test'
return input_text, output_text
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
a : Tuple = BartphoTokenizer(A , self.monolingual_vocab_file , **self.special_tokens_map )
a : int = 'This is a là test'
a : int = '▁This ▁is ▁a ▁l à ▁t est'.split()
a : str = tokenizer.tokenize(A )
self.assertListEqual(A , A )
a : Union[str, Any] = tokens + [tokenizer.unk_token]
a : Dict = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
| 362 |
"""simple docstring"""
_UpperCamelCase : List[Any] = 8.31_44_62 # Unit - J mol-1 K-1
def snake_case (A_ :float , A_ :float , A_ :float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def snake_case (A_ :float , A_ :float , A_ :float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 186 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a_ : str = logging.get_logger(__name__)
a_ : List[Any] = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def a_ ( __snake_case : Dict , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
for attribute in key.split('''.''' ):
lowerCamelCase_ =getattr(__snake_case , __snake_case )
if weight_type is not None:
lowerCamelCase_ =getattr(__snake_case , __snake_case ).shape
else:
lowerCamelCase_ =hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowerCamelCase_ =value
elif weight_type == "weight_g":
lowerCamelCase_ =value
elif weight_type == "weight_v":
lowerCamelCase_ =value
elif weight_type == "bias":
lowerCamelCase_ =value
else:
lowerCamelCase_ =value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def a_ ( __snake_case : int , __snake_case : Optional[int] , __snake_case : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =fairseq_model.state_dict()
lowerCamelCase_ =hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase_ =False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , )
lowerCamelCase_ =True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase_ ='''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCamelCase_ =True
if "*" in mapped_key:
lowerCamelCase_ =name.split(__snake_case )[0].split('''.''' )[-2]
lowerCamelCase_ =mapped_key.replace('''*''' , __snake_case )
if "weight_g" in name:
lowerCamelCase_ ='''weight_g'''
elif "weight_v" in name:
lowerCamelCase_ ='''weight_v'''
elif "weight" in name:
lowerCamelCase_ ='''weight'''
elif "bias" in name:
lowerCamelCase_ ='''bias'''
else:
lowerCamelCase_ =None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def a_ ( __snake_case : Tuple , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ =full_name.split('''conv_layers.''' )[-1]
lowerCamelCase_ =name.split('''.''' )
lowerCamelCase_ =int(items[0] )
lowerCamelCase_ =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowerCamelCase_ =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowerCamelCase_ =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowerCamelCase_ =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowerCamelCase_ =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__snake_case )
def a_ ( __snake_case : List[str] , __snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =SEWConfig()
if is_finetuned:
lowerCamelCase_ =model.wav_encoder.wav_model.cfg
else:
lowerCamelCase_ =model.cfg
lowerCamelCase_ =fs_config.conv_bias
lowerCamelCase_ =eval(fs_config.conv_feature_layers )
lowerCamelCase_ =[x[0] for x in conv_layers]
lowerCamelCase_ =[x[1] for x in conv_layers]
lowerCamelCase_ =[x[2] for x in conv_layers]
lowerCamelCase_ ='''gelu'''
lowerCamelCase_ ='''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
lowerCamelCase_ =0.0
lowerCamelCase_ =fs_config.activation_fn.name
lowerCamelCase_ =fs_config.encoder_embed_dim
lowerCamelCase_ =0.0_2
lowerCamelCase_ =fs_config.encoder_ffn_embed_dim
lowerCamelCase_ =1e-5
lowerCamelCase_ =fs_config.encoder_layerdrop
lowerCamelCase_ =fs_config.encoder_attention_heads
lowerCamelCase_ =fs_config.conv_pos_groups
lowerCamelCase_ =fs_config.conv_pos
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =fs_config.encoder_layers
lowerCamelCase_ =fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
lowerCamelCase_ =model.cfg
lowerCamelCase_ =fs_config.final_dropout
lowerCamelCase_ =fs_config.layerdrop
lowerCamelCase_ =fs_config.activation_dropout
lowerCamelCase_ =fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
lowerCamelCase_ =fs_config.attention_dropout
lowerCamelCase_ =fs_config.dropout_input
lowerCamelCase_ =fs_config.dropout
lowerCamelCase_ =fs_config.mask_channel_length
lowerCamelCase_ =fs_config.mask_channel_prob
lowerCamelCase_ =fs_config.mask_length
lowerCamelCase_ =fs_config.mask_prob
lowerCamelCase_ ='''Wav2Vec2FeatureExtractor'''
lowerCamelCase_ ='''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def a_ ( __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : str=None , __snake_case : str=None , __snake_case : Dict=True ) -> List[str]:
"""simple docstring"""
if is_finetuned:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
lowerCamelCase_ =SEWConfig.from_pretrained(__snake_case )
else:
lowerCamelCase_ =convert_config(model[0] , __snake_case )
lowerCamelCase_ =model[0].eval()
lowerCamelCase_ =True if config.feat_extract_norm == '''layer''' else False
lowerCamelCase_ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
if is_finetuned:
if dict_path:
lowerCamelCase_ =Dictionary.load(__snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase_ =target_dict.pad_index
lowerCamelCase_ =target_dict.bos_index
lowerCamelCase_ =target_dict.pad_index
lowerCamelCase_ =target_dict.bos_index
lowerCamelCase_ =target_dict.eos_index
lowerCamelCase_ =len(target_dict.symbols )
lowerCamelCase_ =os.path.join(__snake_case , '''vocab.json''' )
if not os.path.isdir(__snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__snake_case ) )
return
os.makedirs(__snake_case , exist_ok=__snake_case )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , __snake_case )
lowerCamelCase_ =WavaVecaCTCTokenizer(
__snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__snake_case , )
lowerCamelCase_ =WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
processor.save_pretrained(__snake_case )
lowerCamelCase_ =SEWForCTC(__snake_case )
else:
lowerCamelCase_ =SEWModel(__snake_case )
feature_extractor.save_pretrained(__snake_case )
recursively_load_weights(__snake_case , __snake_case , __snake_case )
hf_model.save_pretrained(__snake_case )
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
a_ : List[Any] = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 75 |
def _SCREAMING_SNAKE_CASE ( lowercase : list ):
'''simple docstring'''
for i in range(len(lowercase ) - 1 , 0 , -1 ):
lowerCamelCase_ = False
for j in range(lowercase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowerCamelCase_ , lowerCamelCase_ = unsorted[j - 1], unsorted[j]
lowerCamelCase_ = True
for j in range(lowercase ):
if unsorted[j] > unsorted[j + 1]:
lowerCamelCase_ , lowerCamelCase_ = unsorted[j + 1], unsorted[j]
lowerCamelCase_ = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase : Dict = [int(item) for item in user_input.split(",")]
print(F"""{cocktail_shaker_sort(unsorted) = }""")
| 204 | 0 |
import requests
from bsa import BeautifulSoup
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : dict ) -> str:
__lowerCamelCase = BeautifulSoup(requests.get(__lowerCAmelCase , params=__lowerCAmelCase ).content , '''html.parser''' )
__lowerCamelCase = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
__lowerCamelCase = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2_018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 339 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class lowerCAmelCase__ ( __lowercase ):
a__ : Union[str, Any] = """open-llama"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any=10_00_00 , SCREAMING_SNAKE_CASE__ : Any=40_96 , SCREAMING_SNAKE_CASE__ : Any=1_10_08 , SCREAMING_SNAKE_CASE__ : Tuple=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Any="silu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=20_48 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-6 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Tuple=1 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> Dict:
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = intermediate_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = initializer_range
__lowerCamelCase = rms_norm_eps
__lowerCamelCase = use_cache
__lowerCamelCase = kwargs.pop(
'''use_memorry_efficient_attention''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_dropout_prob
__lowerCamelCase = use_stable_embedding
__lowerCamelCase = shared_input_output_embedding
__lowerCamelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __A ( self : Dict ) -> Optional[int]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f'''got {self.rope_scaling}''' )
__lowerCamelCase = self.rope_scaling.get('''type''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.rope_scaling.get('''factor''' , SCREAMING_SNAKE_CASE__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 339 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 |
__A ={str(digit): digit**5 for digit in range(1_0)}
def lowerCamelCase_ ( lowerCamelCase__ ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCamelCase__ ) )
def lowerCamelCase_ ( ):
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(lowerCamelCase__ ) )
if __name__ == "__main__":
print(solution())
| 19 | 1 |
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
snake_case_ = 5_0003
snake_case_ = 5_0002
@require_sentencepiece
@require_tokenizers
class A_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = PLBartTokenizer
__UpperCamelCase = None
__UpperCamelCase = False
def UpperCAmelCase__ ( self :Any ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = PLBartTokenizer(lowercase_ , language_codes='base' , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Dict:
UpperCAmelCase = PLBartTokenizer(lowercase_ , language_codes='base' , keep_accents=lowercase_ )
UpperCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
UpperCAmelCase = tokenizer.vocab_size
UpperCAmelCase = [tokenizer.convert_ids_to_tokens(lowercase_ ) for x in range(end - 4 , lowercase_ )]
self.assertListEqual(lowercase_ , ['__java__', '__python__', '__en_XX__', '<mask>'] )
UpperCAmelCase = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
UpperCAmelCase = tokenizer(lowercase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ ) , lowercase_ , )
def UpperCAmelCase__ ( self :Tuple ) -> int:
UpperCAmelCase = PLBartTokenizer(lowercase_ , language_codes='multi' , keep_accents=lowercase_ )
UpperCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
UpperCAmelCase = tokenizer.vocab_size
UpperCAmelCase = [tokenizer.convert_ids_to_tokens(lowercase_ ) for x in range(end - 7 , lowercase_ )]
self.assertListEqual(
lowercase_ , ['__java__', '__python__', '__en_XX__', '__javascript__', '__php__', '__ruby__', '__go__'] )
UpperCAmelCase = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
UpperCAmelCase = tokenizer(lowercase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ ) , lowercase_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = """uclanlp/plbart-python-en_XX"""
__UpperCamelCase = [
"""def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])""",
"""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""",
]
__UpperCamelCase = [
"""Returns the maximum value of a b c.""",
"""Sums the values of a b c.""",
]
__UpperCamelCase = [
1_34,
54_52,
3_34_60,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
9_88,
20,
3_34_56,
19,
3_34_56,
7_71,
39,
42_58,
8_89,
33_18,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
24_71,
2,
PYTHON_CODE,
]
@classmethod
def UpperCAmelCase__ ( cls :Any ) -> Tuple:
UpperCAmelCase = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='base' , src_lang='python' , tgt_lang='en_XX' )
UpperCAmelCase = 1
return cls
def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__java__'] , 5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__python__'] , 5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__en_XX__'] , 5_00_03 )
def UpperCAmelCase__ ( self :Any ) -> Dict:
UpperCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase_ )
def UpperCAmelCase__ ( self :int ) -> Dict:
self.assertIn(lowercase_ , self.tokenizer.all_special_ids )
UpperCAmelCase = [EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
UpperCAmelCase = self.tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ )
UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertNotIn(self.tokenizer.eos_token , lowercase_ )
def UpperCAmelCase__ ( self :Tuple ) -> Any:
UpperCAmelCase = ['def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])' * 20]
self.assertIsInstance(src_text[0] , lowercase_ )
UpperCAmelCase = 10
UpperCAmelCase = self.tokenizer(lowercase_ , max_length=lowercase_ , truncation=lowercase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowercase_ )
self.assertEqual(len(lowercase_ ) , lowercase_ )
def UpperCAmelCase__ ( self :int ) -> int:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', '__java__'] ) , [5_00_04, 5_00_01] )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Tuple:
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase_ )
UpperCAmelCase = PLBartTokenizer.from_pretrained(lowercase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase_ )
@require_torch
def UpperCAmelCase__ ( self :int ) -> List[Any]:
UpperCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase_ , return_tensors='pt' )
UpperCAmelCase = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , lowercase_ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def UpperCAmelCase__ ( self :str ) -> int:
UpperCAmelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
UpperCAmelCase = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def UpperCAmelCase__ ( self :int ) -> List[Any]:
UpperCAmelCase = self.tokenizer(self.src_text , padding=lowercase_ , truncation=lowercase_ , max_length=3 , return_tensors='pt' )
UpperCAmelCase = self.tokenizer(
text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=10 , return_tensors='pt' )
UpperCAmelCase = targets['input_ids']
UpperCAmelCase = shift_tokens_right(lowercase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[Any]:
UpperCAmelCase = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='java' )
self.assertEqual(
nested_simplify(lowercase_ ) , {
# A, test, EOS, en_XX
'input_ids': [[1_50, 2_42, 2, 5_00_03]],
'attention_mask': [[1, 1, 1, 1]],
# java
'forced_bos_token_id': 5_00_01,
} , )
| 181 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Any , lowercase_ :Optional[Any] , lowercase_ :int=13 , lowercase_ :Optional[Any]=7 , lowercase_ :List[str]=True , lowercase_ :Dict=True , lowercase_ :str=True , lowercase_ :Optional[Any]=True , lowercase_ :Dict=99 , lowercase_ :int=32 , lowercase_ :str=5 , lowercase_ :Dict=4 , lowercase_ :Tuple=37 , lowercase_ :Dict="gelu" , lowercase_ :List[str]=0.1 , lowercase_ :int=0.1 , lowercase_ :Any=5_12 , lowercase_ :Optional[Any]=16 , lowercase_ :Optional[int]=2 , lowercase_ :Union[str, Any]=0.02 , lowercase_ :Dict=False , lowercase_ :Tuple=True , lowercase_ :Optional[Any]="None" , lowercase_ :int=3 , lowercase_ :Tuple=4 , lowercase_ :Optional[int]=None , ) -> Tuple:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = relative_attention
UpperCAmelCase = position_biased_input
UpperCAmelCase = pos_att_type
UpperCAmelCase = scope
def UpperCAmelCase__ ( self :Any ) -> Tuple:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self :Tuple ) -> Tuple:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :str ) -> List[str]:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :str , lowercase_ :Tuple , lowercase_ :str , lowercase_ :int , lowercase_ :Union[str, Any] , lowercase_ :List[str] , lowercase_ :Optional[int] ) -> Optional[int]:
UpperCAmelCase = DebertaVaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )[0]
UpperCAmelCase = model(lowercase_ , token_type_ids=lowercase_ )[0]
UpperCAmelCase = model(lowercase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Dict , lowercase_ :List[str] , lowercase_ :Any , lowercase_ :List[str] , lowercase_ :Tuple , lowercase_ :List[Any] , lowercase_ :int ) -> Any:
UpperCAmelCase = DebertaVaForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Any , lowercase_ :List[Any] , lowercase_ :Any , lowercase_ :Dict , lowercase_ :str , lowercase_ :List[Any] , lowercase_ :Dict ) -> Union[str, Any]:
UpperCAmelCase = self.num_labels
UpperCAmelCase = DebertaVaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowercase_ )
def UpperCAmelCase__ ( self :Any , lowercase_ :Union[str, Any] , lowercase_ :Dict , lowercase_ :Union[str, Any] , lowercase_ :Any , lowercase_ :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :Any ) -> List[Any]:
UpperCAmelCase = self.num_labels
UpperCAmelCase = DebertaVaForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self :Any , lowercase_ :Any , lowercase_ :List[Any] , lowercase_ :Union[str, Any] , lowercase_ :Union[str, Any] , lowercase_ :Dict , lowercase_ :List[Any] , lowercase_ :Optional[int] ) -> Dict:
UpperCAmelCase = DebertaVaForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :str , lowercase_ :List[Any] , lowercase_ :Union[str, Any] , lowercase_ :List[Any] , lowercase_ :Union[str, Any] , lowercase_ :Any , lowercase_ :Any ) -> List[Any]:
UpperCAmelCase = DebertaVaForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self :Dict ) -> int:
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def UpperCAmelCase__ ( self :Optional[Any] ) -> Optional[int]:
UpperCAmelCase = DebertaVaModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self :Optional[Any] ) -> Tuple:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowercase_ )
def UpperCAmelCase__ ( self :Any ) -> List[str]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] ) -> List[Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Optional[int]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Tuple:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowercase_ )
def UpperCAmelCase__ ( self :Any ) -> Union[str, Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowercase_ )
@slow
def UpperCAmelCase__ ( self :Any ) -> Optional[int]:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = DebertaVaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def UpperCAmelCase__ ( self :str ) -> Tuple:
pass
@slow
def UpperCAmelCase__ ( self :List[Any] ) -> Any:
UpperCAmelCase = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
UpperCAmelCase = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ )[0]
# compare the actual values for a slice.
UpperCAmelCase = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase_ , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 181 | 1 |
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , *,
__a : int = 4 , __a : int = 7_68 , __a : int , __a : int , ):
super().__init__()
_a = nn.Parameter(torch.zeros(__a ) )
# parameters for additional clip time embeddings
_a = nn.Linear(__a , __a )
_a = nn.Linear(__a , __a )
# parameters for encoder hidden states
_a = clip_extra_context_tokens
_a = nn.Linear(
__a , self.clip_extra_context_tokens * cross_attention_dim )
_a = nn.Linear(__a , __a )
_a = nn.LayerNorm(__a )
def UpperCamelCase__ ( self : Optional[Any] , *, __a : Tuple , __a : Union[str, Any] , __a : Any , __a : List[Any] ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_a = image_embeddings.shape[0]
_a = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_a = classifier_free_guidance_embeddings.expand(
__a , -1 )
_a = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_a = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_a = self.embedding_proj(__a )
_a = self.clip_image_embeddings_project_to_time_embeddings(__a )
_a = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_a = self.clip_extra_context_tokens_proj(__a )
_a = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens )
_a = clip_extra_context_tokens.permute(0 , 2 , 1 )
_a = self.encoder_hidden_states_proj(__a )
_a = self.text_encoder_hidden_states_norm(__a )
_a = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 63 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
UpperCAmelCase__ : str = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase__ : int = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
UpperCAmelCase__ : List[Any] = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16_000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
UpperCAmelCase__ : str = tempfile.mkdtemp()
UpperCAmelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase__ : List[Any] = os.path.join(self.tmpdirname , _A )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
# load decoder from hub
UpperCAmelCase__ : Tuple = '''hf-internal-testing/ngram-beam-search-decoder'''
def lowercase_ ( self : Union[str, Any] , **_A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.add_kwargs_tokens_map.copy()
kwargs.update(_A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : Dict , **_A : str ):
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : Tuple , **_A : int ):
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
UpperCAmelCase__ : Any = self.get_feature_extractor()
UpperCAmelCase__ : Dict = self.get_decoder()
UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_A , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.get_feature_extractor()
UpperCAmelCase__ : List[Any] = self.get_tokenizer()
UpperCAmelCase__ : int = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : str = floats_list((3, 1_000) )
UpperCAmelCase__ : str = feature_extractor(_A , return_tensors='''np''' )
UpperCAmelCase__ : Optional[Any] = processor(_A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.get_feature_extractor()
UpperCAmelCase__ : Any = self.get_tokenizer()
UpperCAmelCase__ : Any = self.get_decoder()
UpperCAmelCase__ : str = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : Union[str, Any] = '''This is a test string'''
UpperCAmelCase__ : Union[str, Any] = processor(text=_A )
UpperCAmelCase__ : Dict = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : str , _A : int=(2, 10, 16) , _A : Optional[int]=77 ):
'''simple docstring'''
np.random.seed(_A )
return np.random.rand(*_A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_feature_extractor()
UpperCAmelCase__ : Dict = self.get_tokenizer()
UpperCAmelCase__ : Tuple = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : Tuple = self._get_dummy_logits(shape=(10, 16) , seed=13 )
UpperCAmelCase__ : Union[str, Any] = processor.decode(_A )
UpperCAmelCase__ : Union[str, Any] = decoder.decode_beams(_A )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def lowercase_ ( self : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.get_feature_extractor()
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase__ : Union[str, Any] = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : List[str] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(_A )
else:
with get_context(_A ).Pool() as pool:
UpperCAmelCase__ : List[Any] = processor.batch_decode(_A , _A )
UpperCAmelCase__ : List[str] = list(_A )
with get_context('''fork''' ).Pool() as p:
UpperCAmelCase__ : List[Any] = decoder.decode_beams_batch(_A , _A )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_A , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(_A , decoded_processor.logit_score )
self.assertListEqual(_A , decoded_processor.lm_score )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_feature_extractor()
UpperCAmelCase__ : str = self.get_tokenizer()
UpperCAmelCase__ : Optional[int] = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : List[Any] = self._get_dummy_logits()
UpperCAmelCase__ : Optional[Any] = 15
UpperCAmelCase__ : Union[str, Any] = -2_0.0
UpperCAmelCase__ : List[str] = -4.0
UpperCAmelCase__ : str = processor.batch_decode(
_A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
UpperCAmelCase__ : Union[str, Any] = decoded_processor_out.text
UpperCAmelCase__ : Tuple = list(_A )
with get_context('''fork''' ).Pool() as pool:
UpperCAmelCase__ : Tuple = decoder.decode_beams_batch(
_A , _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
UpperCAmelCase__ : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
UpperCAmelCase__ : Any = [d[0][2] for d in decoded_decoder_out]
UpperCAmelCase__ : Tuple = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _A )
self.assertTrue(np.array_equal(_A , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , _A , atol=1e-3 ) )
self.assertTrue(np.array_equal(_A , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , _A , atol=1e-3 ) )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_feature_extractor()
UpperCAmelCase__ : str = self.get_tokenizer()
UpperCAmelCase__ : List[str] = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : Optional[Any] = self._get_dummy_logits()
UpperCAmelCase__ : List[Any] = 2.0
UpperCAmelCase__ : Union[str, Any] = 5.0
UpperCAmelCase__ : Any = -2_0.0
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Any = processor.batch_decode(
_A , alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
UpperCAmelCase__ : List[Any] = decoded_processor_out.text
UpperCAmelCase__ : List[Any] = list(_A )
decoder.reset_params(
alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
with get_context('''fork''' ).Pool() as pool:
UpperCAmelCase__ : Any = decoder.decode_beams_batch(
_A , _A , )
UpperCAmelCase__ : Union[str, Any] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _A )
UpperCAmelCase__ : Any = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : List[Any] = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase__ : Optional[Any] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
UpperCAmelCase__ : Dict = os.listdir(_A )
UpperCAmelCase__ : str = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_A , _A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = snapshot_download('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM.from_pretrained(_A )
UpperCAmelCase__ : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase__ : Union[str, Any] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
UpperCAmelCase__ : Optional[Any] = os.listdir(_A )
UpperCAmelCase__ : int = os.listdir(_A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_A , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Any = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Tuple = floats_list((3, 1_000) )
UpperCAmelCase__ : Tuple = processor_wavaveca(_A , return_tensors='''np''' )
UpperCAmelCase__ : Union[str, Any] = processor_auto(_A , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
UpperCAmelCase__ : str = self._get_dummy_logits()
UpperCAmelCase__ : List[Any] = processor_wavaveca.batch_decode(_A )
UpperCAmelCase__ : List[Any] = processor_auto.batch_decode(_A )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_feature_extractor()
UpperCAmelCase__ : Any = self.get_tokenizer()
UpperCAmelCase__ : List[str] = self.get_decoder()
UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def lowercase_ ( _A : Tuple , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [d[key] for d in offsets]
return retrieved_list
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Tuple = self._get_dummy_logits()[0]
UpperCAmelCase__ : int = processor.decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : List[str] = self._get_dummy_logits()
UpperCAmelCase__ : Dict = processor.batch_decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_A , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowercase_ ( self : str ):
'''simple docstring'''
import torch
UpperCAmelCase__ : Dict = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_A )
UpperCAmelCase__ : Optional[Any] = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16_000 ) )
UpperCAmelCase__ : Any = iter(_A )
UpperCAmelCase__ : Dict = next(_A )
UpperCAmelCase__ : Optional[int] = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
UpperCAmelCase__ : List[Any] = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
UpperCAmelCase__ : Union[str, Any] = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
UpperCAmelCase__ : Dict = model(_A ).logits.cpu().numpy()
UpperCAmelCase__ : List[str] = processor.decode(logits[0] , output_word_offsets=_A )
UpperCAmelCase__ : Tuple = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
UpperCAmelCase__ : List[str] = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
UpperCAmelCase__ : Tuple = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , _A )
self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , output.text )
# output times
UpperCAmelCase__ : List[Any] = torch.tensor(self.get_from_offsets(_A , '''start_time''' ) )
UpperCAmelCase__ : Dict = torch.tensor(self.get_from_offsets(_A , '''end_time''' ) )
# fmt: off
UpperCAmelCase__ : Any = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
UpperCAmelCase__ : Any = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) )
self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) )
| 181 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = tempfile.mkdtemp()
# fmt: off
__UpperCamelCase = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
__UpperCamelCase = dict(zip(lowercase , range(len(lowercase ) ) ) )
__UpperCamelCase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
__UpperCamelCase = {"""unk_token""": """<unk>"""}
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase ) )
__UpperCamelCase = {
"""do_resize""": True,
"""size""": 2_0,
"""do_center_crop""": True,
"""crop_size""": 1_8,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
__UpperCamelCase = os.path.join(self.tmpdirname , lowercase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowercase , lowercase )
def __lowerCamelCase ( self , **lowercase ) -> Optional[int]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def __lowerCamelCase ( self , **lowercase ) -> List[Any]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase )
def __lowerCamelCase ( self , **lowercase ) -> List[Any]:
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase )
def __lowerCamelCase ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__UpperCamelCase = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = self.get_rust_tokenizer()
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
processor_slow.save_pretrained(self.tmpdirname )
__UpperCamelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase )
__UpperCamelCase = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
processor_fast.save_pretrained(self.tmpdirname )
__UpperCamelCase = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase )
self.assertIsInstance(processor_fast.tokenizer , lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase )
self.assertIsInstance(processor_fast.image_processor , lowercase )
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__UpperCamelCase = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
__UpperCamelCase = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = image_processor(lowercase , return_tensors="""np""" )
__UpperCamelCase = processor(images=lowercase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
__UpperCamelCase = """lower newer"""
__UpperCamelCase = processor(text=lowercase )
__UpperCamelCase = tokenizer(lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
__UpperCamelCase = """lower newer"""
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
__UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase = processor.batch_decode(lowercase )
__UpperCamelCase = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
__UpperCamelCase = """lower newer"""
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 243 |
'''simple docstring'''
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def _lowercase ( *__A ):
'''simple docstring'''
if not isinstance(__A ,__A ):
__UpperCamelCase = list(__A )
for i in range(len(__A ) ):
__UpperCamelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(__A ,__A ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def _lowercase ( __A = None ,__A = 128 ):
'''simple docstring'''
if function is None:
return functools.partial(__A ,starting_batch_size=__A )
__UpperCamelCase = starting_batch_size
def decorator(*__A ,**__A ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__UpperCamelCase = list(inspect.signature(__A ).parameters.keys() )
# Guard against user error
if len(__A ) < (len(__A ) + 1):
__UpperCamelCase = """, """.join([f"{arg}={value}" for arg, value in zip(params[1:] ,args[1:] )] )
raise TypeError(
f"Batch size was passed into `{function.__name__}` as the first argument when called."
f"Remove this as the decorator already does so: `{function.__name__}({arg_str})`" )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(__A ,*__A ,**__A )
except Exception as e:
if should_reduce_batch_size(__A ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 243 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | """simple docstring"""
def lowerCAmelCase__ ( _UpperCamelCase : list[int] ) -> int:
"""simple docstring"""
if not numbers:
return 0
if not isinstance(_UpperCamelCase , (list, tuple) ) or not all(
isinstance(_UpperCamelCase , _UpperCamelCase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
snake_case = snake_case = snake_case = numbers[0]
for i in range(1 , len(_UpperCamelCase ) ):
# update the maximum and minimum subarray products
snake_case = numbers[i]
if number < 0:
snake_case ,snake_case = min_till_now, max_till_now
snake_case = max(_UpperCamelCase , max_till_now * number )
snake_case = min(_UpperCamelCase , min_till_now * number )
# update the maximum product found till now
snake_case = max(_UpperCamelCase , _UpperCamelCase )
return max_prod
| 150 | 0 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowercase : Optional[Any] = datasets.load_iris()
lowercase : Optional[Any] = np.array(data['data'])
lowercase : Optional[Any] = np.array(data['target'])
lowercase : Optional[int] = data['target_names']
lowercase : List[Any] = train_test_split(X, y)
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
return np.linalg.norm(np.array(snake_case__ ) - np.array(snake_case__ ) )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=5 ):
'''simple docstring'''
A : Dict = zip(snake_case__ , snake_case__ )
# List of distances of all points from the point to be classified
A : Any = []
for data_point in data:
A : Any = euclidean_distance(data_point[0] , snake_case__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
A : str = [i[1] for i in sorted(snake_case__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
A : List[Any] = Counter(snake_case__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 354 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[str] = 2
A : Dict = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(snake_case__ )
if n > 1:
factors.append(snake_case__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ = StableUnCLIPImgaImgPipeline
lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase_ = frozenset([] )
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
lowerCamelCase__: List[str] =32
lowerCamelCase__: Optional[Any] =embedder_hidden_size
# image encoding components
lowerCamelCase__: Dict =CLIPImageProcessor(crop_size=32 , size=32)
torch.manual_seed(0)
lowerCamelCase__: str =CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=_snake_case , projection_dim=_snake_case , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ))
# regular denoising components
torch.manual_seed(0)
lowerCamelCase__: str =StableUnCLIPImageNormalizer(embedding_dim=_snake_case)
lowerCamelCase__: Union[str, Any] =DDPMScheduler(beta_schedule="squaredcos_cap_v2")
torch.manual_seed(0)
lowerCamelCase__: str =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
torch.manual_seed(0)
lowerCamelCase__: int =CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ))
torch.manual_seed(0)
lowerCamelCase__: str =UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_snake_case , layers_per_block=1 , upcast_attention=_snake_case , use_linear_projection=_snake_case , )
torch.manual_seed(0)
lowerCamelCase__: Dict =DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=_snake_case , steps_offset=1 , )
torch.manual_seed(0)
lowerCamelCase__: Tuple =AutoencoderKL()
lowerCamelCase__: Optional[int] ={
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : List[Any]=True) ->List[Any]:
'''simple docstring'''
if str(_snake_case).startswith("mps"):
lowerCamelCase__: int =torch.manual_seed(_snake_case)
else:
lowerCamelCase__: int =torch.Generator(device=_snake_case).manual_seed(_snake_case)
lowerCamelCase__: Optional[int] =floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case)).to(_snake_case)
if pil_image:
lowerCamelCase__: List[Any] =input_image * 0.5 + 0.5
lowerCamelCase__: List[Any] =input_image.clamp(0 , 1)
lowerCamelCase__: Any =input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
lowerCamelCase__: List[str] =DiffusionPipeline.numpy_to_pil(_snake_case)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def SCREAMING_SNAKE_CASE_ (self : Any) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] ="cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__: int =self.get_dummy_components()
lowerCamelCase__: Optional[int] =StableUnCLIPImgaImgPipeline(**_snake_case)
lowerCamelCase__: Union[str, Any] =sd_pipe.to(_snake_case)
sd_pipe.set_progress_bar_config(disable=_snake_case)
lowerCamelCase__: Optional[Any] =self.get_dummy_inputs(_snake_case)
inputs.update({"image_embeds": None})
lowerCamelCase__: Dict =sd_pipe(**_snake_case).images
lowerCamelCase__: Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__: List[str] =np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[str] =torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=_snake_case)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=_snake_case)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_snake_case)
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[Any] =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png")
lowerCamelCase__: Optional[int] =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy")
lowerCamelCase__: List[Any] =StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa)
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__: List[Any] =torch.Generator(device="cpu").manual_seed(0)
lowerCamelCase__: Tuple =pipe(_snake_case , "anime turle" , generator=_snake_case , output_type="np")
lowerCamelCase__: Optional[Any] =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_snake_case , _snake_case)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->str:
'''simple docstring'''
lowerCamelCase__: str =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png")
lowerCamelCase__: Tuple =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy")
lowerCamelCase__: Dict =StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa)
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__: int =torch.Generator(device="cpu").manual_seed(0)
lowerCamelCase__: Union[str, Any] =pipe(_snake_case , "anime turle" , generator=_snake_case , output_type="np")
lowerCamelCase__: str =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_snake_case , _snake_case)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->int:
'''simple docstring'''
lowerCamelCase__: List[str] =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png")
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase__: Tuple =StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa)
lowerCamelCase__: str =pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__: List[str] =pipe(
_snake_case , "anime turtle" , num_inference_steps=2 , output_type="np" , )
lowerCamelCase__: Tuple =torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 10 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
_lowerCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
_lowerCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_lowerCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
_lowerCAmelCase = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(snake_case )-1}' )
if "norm" in key:
_lowerCAmelCase = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_lowerCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
_lowerCAmelCase = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(snake_case )-1}' )
if "layer_norm1" in key:
_lowerCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
_lowerCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
_lowerCAmelCase = key[key.find("""block""" ) + len("""block""" )]
_lowerCAmelCase = key.replace(F'block{idx}' , F'block.{int(snake_case )-1}' )
if "attn.q" in key:
_lowerCAmelCase = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
_lowerCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
_lowerCAmelCase = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
_lowerCAmelCase = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
_lowerCAmelCase = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
_lowerCAmelCase = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
_lowerCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
_lowerCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_lowerCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )]
_lowerCAmelCase = key.replace(F'linear_c{idx}' , F'linear_c.{int(snake_case )-1}' )
if "bot_conv" in key:
_lowerCAmelCase = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
_lowerCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
_lowerCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
_lowerCAmelCase = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
_lowerCAmelCase = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
_lowerCAmelCase = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
_lowerCAmelCase = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
_lowerCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" )
_lowerCAmelCase = value
return new_state_dict
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
_lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
_lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
_lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
_lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
_lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return image
@torch.no_grad()
def _UpperCAmelCase ( snake_case , snake_case , snake_case=False , snake_case=None ):
"""simple docstring"""
_lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_lowerCAmelCase = GLPNImageProcessor()
# prepare image
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=snake_case , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
_lowerCAmelCase = torch.load(snake_case , map_location=torch.device("""cpu""" ) )
# rename keys
_lowerCAmelCase = rename_keys(snake_case )
# key and value matrices need special treatment
read_in_k_v(snake_case , snake_case )
# create HuggingFace model and load state dict
_lowerCAmelCase = GLPNForDepthEstimation(snake_case )
model.load_state_dict(snake_case )
model.eval()
# forward pass
_lowerCAmelCase = model(snake_case )
_lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_lowerCAmelCase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
_lowerCAmelCase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
_lowerCAmelCase = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , snake_case , atol=1E-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(snake_case , snake_case ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=snake_case , )
image_processor.push_to_hub(
repo_path_or_name=Path(snake_case , snake_case ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=snake_case , )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
A__ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 82 | 0 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase__ ( a ) -> Optional[Any]:
return getitem, k
def lowerCamelCase__ ( a , a ) -> int:
return setitem, k, v
def lowerCamelCase__ ( a ) -> Tuple:
return delitem, k
def lowerCamelCase__ ( a , a , *a ) -> List[Any]:
try:
return fun(a , *a ), None
except Exception as e:
return None, e
UpperCAmelCase__ : Any = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
UpperCAmelCase__ : Optional[int] = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
UpperCAmelCase__ : int = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
UpperCAmelCase__ : Any = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
UpperCAmelCase__ : Tuple = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
UpperCAmelCase__ : Optional[int] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def lowerCamelCase__ ( a ) -> Optional[int]:
_A: str = HashMap(initial_block_size=4 )
_A: str = {}
for _, (fun, *args) in enumerate(a ):
_A , _A: Dict = _run_operation(a , a , *a )
_A , _A: Dict = _run_operation(a , a , *a )
assert my_res == py_res
assert str(a ) == str(a )
assert set(a ) == set(a )
assert len(a ) == len(a )
assert set(my.items() ) == set(py.items() )
def lowerCamelCase__ ( ) -> Dict:
def is_public(a ) -> bool:
return not name.startswith('''_''' )
_A: Dict = {name for name in dir({} ) if is_public(a )}
_A: str = {name for name in dir(HashMap() ) if is_public(a )}
assert dict_public_names > hash_public_names
| 301 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : List[str]=3_2 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : int=1_0 , lowerCAmelCase_ : Tuple=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : Optional[Any]=[1, 1, 2, 1] , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]="relu" , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : List[Any]=None , ):
"""simple docstring"""
_A: str = parent
_A: List[Any] = batch_size
_A: Optional[int] = image_size
_A: Dict = num_channels
_A: str = embeddings_size
_A: Any = hidden_sizes
_A: Dict = depths
_A: Any = is_training
_A: int = use_labels
_A: Tuple = hidden_act
_A: int = num_labels
_A: int = scope
_A: str = len(lowerCAmelCase_ )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A: Union[str, Any] = self.get_config()
return config, pixel_values
def __magic_name__ ( self : str ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __magic_name__ ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: str = FlaxRegNetModel(config=lowerCAmelCase_ )
_A: Optional[int] = model(lowerCAmelCase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __magic_name__ ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Union[str, Any] = self.num_labels
_A: Union[str, Any] = FlaxRegNetForImageClassification(config=lowerCAmelCase_ )
_A: str = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: str = self.prepare_config_and_inputs()
_A , _A: Optional[int] = config_and_inputs
_A: Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Tuple = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : int = False
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: int = FlaxRegNetModelTester(self )
_A: Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def __magic_name__ ( self : str ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self : int ):
"""simple docstring"""
return
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __magic_name__ ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
pass
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A , _A: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A: Union[str, Any] = model_class(lowerCAmelCase_ )
_A: Any = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A: Any = [*signature.parameters.keys()]
_A: Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def __magic_name__ ( self : str ):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple ):
_A: int = model_class(lowerCAmelCase_ )
_A: List[str] = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_A: str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A: Tuple = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
_A , _A: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A: Optional[Any] = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A: int = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A , _A: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_A: int = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
_A: Union[str, Any] = model_class(lowerCAmelCase_ )
@jax.jit
def model_jitted(lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Optional[Any] ):
return model(pixel_values=lowerCAmelCase_ , **lowerCAmelCase_ )
with self.subTest('''JIT Enabled''' ):
_A: str = model_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_A: List[Any] = model_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ) -> Tuple:
_A: List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: List[str] = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
_A: str = self.default_image_processor
_A: int = prepare_img()
_A: List[Any] = image_processor(images=lowerCAmelCase_ , return_tensors='''np''' )
_A: str = model(**lowerCAmelCase_ )
# verify the logits
_A: str = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_A: Tuple = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 301 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __snake_case :
def __init__( self : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=1_3 , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=9_9 , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : Optional[int]=5 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Tuple=3_7 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Any=5_1_2 , __lowerCAmelCase : List[str]=1_6 , __lowerCAmelCase : int=2 , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : List[str]=None , ):
"""simple docstring"""
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Optional[Any] = seq_length
_lowerCamelCase : Optional[Any] = is_training
_lowerCamelCase : Optional[int] = use_token_type_ids
_lowerCamelCase : str = use_labels
_lowerCamelCase : Optional[Any] = vocab_size
_lowerCamelCase : int = hidden_size
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : List[Any] = num_attention_heads
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Optional[int] = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : Optional[int] = type_vocab_size
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Union[str, Any] = num_labels
_lowerCamelCase : Any = num_choices
_lowerCamelCase : str = scope
_lowerCamelCase : List[Any] = self.vocab_size - 1
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : str = None
if self.use_token_type_ids:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : int = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : Optional[int] = None
if self.use_labels:
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : Any = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , *__lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Tuple = OpenAIGPTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_lowerCamelCase : Dict = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
_lowerCamelCase : int = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , *__lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = OpenAIGPTLMHeadModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , *__lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Any = OpenAIGPTDoubleHeadsModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : int = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , *__lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.num_labels
_lowerCamelCase : int = OpenAIGPTForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Tuple = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : str = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : List[Any] = config_and_inputs
_lowerCamelCase : str = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , _lowercase , unittest.TestCase):
snake_case__ : str = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
snake_case__ : str = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
snake_case__ : str = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict=False ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_lowerCamelCase : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase , )
_lowerCamelCase : Any = inputs_dict['''labels''']
_lowerCamelCase : Optional[int] = inputs_dict['''labels''']
_lowerCamelCase : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCAmelCase , )
_lowerCamelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : int = OpenAIGPTModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self , config_class=__lowerCAmelCase , n_embd=3_7 )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = OpenAIGPTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
class __snake_case ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(__lowerCAmelCase )
_lowerCamelCase : Tuple = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=__lowerCAmelCase ) # the president is
_lowerCamelCase : List[str] = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_lowerCamelCase : int = model.generate(__lowerCAmelCase , do_sample=__lowerCAmelCase )
self.assertListEqual(output_ids[0].tolist() , __lowerCAmelCase )
| 72 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass(frozen=_UpperCamelCase )
class snake_case__:
"""simple docstring"""
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
lowercase_ = None
lowercase_ = None
@dataclass(frozen=_UpperCamelCase )
class snake_case__:
"""simple docstring"""
lowercase_ = 42
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 42
def __init__( self : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : PreTrainedTokenizer , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Tuple=False , SCREAMING_SNAKE_CASE : bool = False , ):
lowercase__ : List[str] = hans_processors[task]()
lowercase__ : Dict = os.path.join(
SCREAMING_SNAKE_CASE , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , ) , )
lowercase__ : List[str] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase__ , lowercase__ : Union[str, Any] = label_list[2], label_list[1]
lowercase__ : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase__ : int = cached_features_file + ".lock"
with FileLock(SCREAMING_SNAKE_CASE ):
if os.path.exists(SCREAMING_SNAKE_CASE ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
lowercase__ : Any = torch.load(SCREAMING_SNAKE_CASE )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
lowercase__ : List[str] = (
processor.get_dev_examples(SCREAMING_SNAKE_CASE ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE )
)
logger.info("Training examples: %s" , len(SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
logger.info("Saving features into cached file %s" , SCREAMING_SNAKE_CASE )
torch.save(self.features , SCREAMING_SNAKE_CASE )
def __len__( self : List[Any] ):
return len(self.features )
def __getitem__( self : str , SCREAMING_SNAKE_CASE : List[str] ):
return self.features[i]
def snake_case ( self : Any ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class snake_case__:
"""simple docstring"""
lowercase_ = 42
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : PreTrainedTokenizer , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] = 128 , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : bool = False , ):
lowercase__ : str = hans_processors[task]()
lowercase__ : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase__ , lowercase__ : str = label_list[2], label_list[1]
lowercase__ : Optional[int] = label_list
lowercase__ : Any = processor.get_dev_examples(SCREAMING_SNAKE_CASE ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 10_000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(SCREAMING_SNAKE_CASE )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowercase__ : Optional[int] = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def snake_case ( self : int ):
return self.dataset
def __len__( self : List[str] ):
return len(self.features )
def __getitem__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ):
return self.features[i]
def snake_case ( self : Any ):
return self.label_list
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : int ):
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE , "heuristics_train_set.txt" ) ) , "train" )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Any ):
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE , "heuristics_evaluation_set.txt" ) ) , "dev" )
def snake_case ( self : Union[str, Any] ):
return ["contradiction", "entailment", "neutral"]
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Dict = []
for i, line in enumerate(SCREAMING_SNAKE_CASE ):
if i == 0:
continue
lowercase__ : str = "%s-%s" % (set_type, line[0])
lowercase__ : str = line[5]
lowercase__ : List[str] = line[6]
lowercase__ : Dict = line[7][2:] if line[7].startswith("ex" ) else line[7]
lowercase__ : Union[str, Any] = line[0]
examples.append(InputExample(guid=SCREAMING_SNAKE_CASE , text_a=SCREAMING_SNAKE_CASE , text_b=SCREAMING_SNAKE_CASE , label=SCREAMING_SNAKE_CASE , pairID=SCREAMING_SNAKE_CASE ) )
return examples
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
lowercase__ : str = {label: i for i, label in enumerate(lowerCamelCase__ )}
lowercase__ : str = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase__ ) , desc="convert examples to features" ):
if ex_index % 10_000 == 0:
logger.info("Writing example %d" % (ex_index) )
lowercase__ : Any = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase__ , max_length=lowerCamelCase__ , padding="max_length" , truncation=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , )
lowercase__ : Optional[int] = label_map[example.label] if example.label in label_map else 0
lowercase__ : Any = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase__ , label=lowerCamelCase__ , pairID=lowerCamelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
lowerCAmelCase__ = {
'''hans''': 3,
}
lowerCAmelCase__ = {
'''hans''': HansProcessor,
}
| 130 | 0 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
_snake_case = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
_snake_case = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
_snake_case = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
_snake_case = sorted(arg_to_scheduler.keys())
_snake_case = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class UpperCAmelCase_ ( pl.LightningModule):
def __init__( self, __a, __a=None, __a="base", __a=None, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__a)
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Tuple = Path(self.hparams.output_dir)
_lowerCAmelCase : Tuple = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
_lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path, **({"num_labels": num_labels} if num_labels is not None else {}), cache_dir=__a, **__a, )
else:
_lowerCAmelCase : PretrainedConfig = config
_lowerCAmelCase : Any = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(self.hparams, __a, __a):
assert hasattr(self.config, __a), f"model config doesn't have a `{p}` attribute"
setattr(self.config, __a, getattr(self.hparams, __a))
if tokenizer is None:
_lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path, cache_dir=__a, )
else:
_lowerCAmelCase : PreTrainedTokenizer = tokenizer
_lowerCAmelCase : Any = MODEL_MODES[mode]
if model is None:
_lowerCAmelCase : List[Any] = self.model_type.from_pretrained(
self.hparams.model_name_or_path, from_tf=bool(".ckpt" in self.hparams.model_name_or_path), config=self.config, cache_dir=__a, )
else:
_lowerCAmelCase : Optional[int] = model
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_type.from_pretrained(*__a, **__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = arg_to_scheduler[self.hparams.lr_scheduler]
_lowerCAmelCase : Tuple = get_schedule_func(
self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps())
_lowerCAmelCase : Dict = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return scheduler
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model
_lowerCAmelCase : Optional[int] = ["bias", "LayerNorm.weight"]
_lowerCAmelCase : Optional[Any] = [
{
"params": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)
], # check this named paramters
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
if self.hparams.adafactor:
_lowerCAmelCase : int = Adafactor(
__a, lr=self.hparams.learning_rate, scale_parameter=__a, relative_step=__a)
else:
_lowerCAmelCase : List[Any] = AdamW(
__a, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon)
_lowerCAmelCase : Optional[Any] = optimizer
_lowerCAmelCase : int = self.get_lr_scheduler()
return [optimizer], [scheduler]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return self.validation_step(__a, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
return self.validation_end(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = max(1, self.hparams.gpus) # TODO: consider num_tpu_cores
_lowerCAmelCase : List[str] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def snake_case__ ( self, __a):
'''simple docstring'''
if stage == "test":
_lowerCAmelCase : List[Any] = len(self.test_dataloader().dataset)
else:
_lowerCAmelCase : str = self.get_dataloader("train", self.hparams.train_batch_size, shuffle=__a)
_lowerCAmelCase : Tuple = len(self.train_dataloader().dataset)
def snake_case__ ( self, __a, __a, __a = False):
'''simple docstring'''
raise NotImplementedError("You must implement this for your task")
def snake_case__ ( self):
'''simple docstring'''
return self.train_loader
def snake_case__ ( self):
'''simple docstring'''
return self.get_dataloader("dev", self.hparams.eval_batch_size, shuffle=__a)
def snake_case__ ( self):
'''simple docstring'''
return self.get_dataloader("test", self.hparams.eval_batch_size, shuffle=__a)
def snake_case__ ( self, __a):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir, "cached_{}_{}_{}".format(
__a, list(filter(__a, self.hparams.model_name_or_path.split("/"))).pop(), str(self.hparams.max_seq_length), ), )
@pl.utilities.rank_zero_only
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.output_dir.joinpath("best_tfmr")
_lowerCAmelCase : Tuple = self.step_count
self.model.save_pretrained(__a)
self.tokenizer.save_pretrained(__a)
@staticmethod
def snake_case__ ( __a, __a):
'''simple docstring'''
parser.add_argument(
"--model_name_or_path", default=__a, type=__a, required=__a, help="Path to pretrained model or model identifier from huggingface.co/models", )
parser.add_argument(
"--config_name", default="", type=__a, help="Pretrained config name or path if not the same as model_name")
parser.add_argument(
"--tokenizer_name", default=__a, type=__a, help="Pretrained tokenizer name or path if not the same as model_name", )
parser.add_argument(
"--cache_dir", default=str(Path(__a).parent / "test_run" / "cache"), type=__a, help="Where do you want to store the pre-trained models downloaded from huggingface.co", )
parser.add_argument(
"--encoder_layerdrop", type=__a, help="Encoder layer dropout probability (Optional). Goes into model.config", )
parser.add_argument(
"--decoder_layerdrop", type=__a, help="Decoder layer dropout probability (Optional). Goes into model.config", )
parser.add_argument(
"--dropout", type=__a, help="Dropout probability (Optional). Goes into model.config", )
parser.add_argument(
"--attention_dropout", type=__a, help="Attention dropout probability (Optional). Goes into model.config", )
parser.add_argument("--learning_rate", default=5E-5, type=__a, help="The initial learning rate for Adam.")
parser.add_argument(
"--lr_scheduler", default="linear", choices=__a, metavar=__a, type=__a, help="Learning rate scheduler", )
parser.add_argument("--weight_decay", default=0.0, type=__a, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1E-8, type=__a, help="Epsilon for Adam optimizer.")
parser.add_argument("--warmup_steps", default=0, type=__a, help="Linear warmup over warmup_steps.")
parser.add_argument("--num_workers", default=4, type=__a, help="kwarg passed to DataLoader")
parser.add_argument("--num_train_epochs", dest="max_epochs", default=3, type=__a)
parser.add_argument("--train_batch_size", default=32, type=__a)
parser.add_argument("--eval_batch_size", default=32, type=__a)
parser.add_argument("--adafactor", action="store_true")
class UpperCAmelCase_ ( pl.Callback):
def snake_case__ ( self, __a, __a):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class UpperCAmelCase_ ( pl.Callback):
def snake_case__ ( self, __a, __a):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__a)
class UpperCAmelCase_ ( pl.Callback):
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = trainer.lr_schedulers[0]["scheduler"]
_lowerCAmelCase : List[str] = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr())}
pl_module.logger.log_metrics(__a)
def snake_case__ ( self, __a, __a):
'''simple docstring'''
rank_zero_info("***** Validation results *****")
_lowerCAmelCase : Optional[Any] = trainer.callback_metrics
# Log results
for key in sorted(__a):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(__a, str(metrics[key])))
def snake_case__ ( self, __a, __a):
'''simple docstring'''
rank_zero_info("***** Test results *****")
_lowerCAmelCase : Any = trainer.callback_metrics
# Log and save results to file
_lowerCAmelCase : Any = os.path.join(pl_module.hparams.output_dir, "test_results.txt")
with open(__a, "w") as writer:
for key in sorted(__a):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(__a, str(metrics[key])))
writer.write("{} = {}\n".format(__a, str(metrics[key])))
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
parser.add_argument(
"--output_dir" , default=str(Path(_lowerCamelCase ).parent / "test_run" / "model_checkpoints" ) , type=_lowerCamelCase , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=_lowerCamelCase , default="O2" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_tpu_cores" , dest="tpu_cores" , type=_lowerCamelCase )
parser.add_argument("--max_grad_norm" , dest="gradient_clip_val" , default=1.0 , type=_lowerCamelCase , help="Max gradient norm" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_predict" , action="store_true" , help="Whether to run predictions on the test set." )
parser.add_argument(
"--gradient_accumulation_steps" , dest="accumulate_grad_batches" , type=_lowerCamelCase , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--seed" , type=_lowerCamelCase , default=42 , help="random seed for initialization" )
parser.add_argument(
"--data_dir" , default=str(Path(_lowerCamelCase ).parent / "test_run" / "dummy-train-data" ) , type=_lowerCamelCase , help="The input data dir. Should contain the training files for the CoNLL-2003 NER task." , )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[] , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase , ):
'''simple docstring'''
pl.seed_everything(args.seed )
# init model
_lowerCAmelCase : str = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_lowerCamelCase )
# add custom checkpoints
if checkpoint_callback is None:
_lowerCAmelCase : Dict = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="checkpoint" , monitor="val_loss" , mode="min" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_lowerCamelCase )
if logging_callback is None:
_lowerCAmelCase : Tuple = LoggingCallback()
_lowerCAmelCase : Any = {}
if args.fpaa:
_lowerCAmelCase : Optional[int] = 16
if args.gpus > 1:
_lowerCAmelCase : Any = "auto"
_lowerCAmelCase : List[Any] = "ddp"
_lowerCAmelCase : Union[str, Any] = args.accumulate_grad_batches
_lowerCAmelCase : int = None
_lowerCAmelCase : Optional[int] = "auto"
_lowerCAmelCase : Dict = pl.Trainer.from_argparse_args(
_lowerCamelCase , weights_summary=_lowerCamelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_lowerCamelCase , val_check_interval=1 , num_sanity_val_steps=2 , **_lowerCamelCase , )
if args.do_train:
trainer.fit(_lowerCamelCase )
else:
print("RAG modeling tests with new set functions successfuly executed!" )
return trainer
| 300 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(__a, __a, return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_lowerCAmelCase : Tuple = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
return inputs_dict
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=32, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=4, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : Union[str, Any] = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : List[Any] = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : List[Any] = num_choices
_lowerCAmelCase : str = scope
_lowerCAmelCase : Union[str, Any] = embedding_size
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : str = None
if self.use_input_mask:
_lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : str = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, embedding_size=self.embedding_size, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertModel(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Any = model(__a)
_lowerCAmelCase : Optional[Any] = [input_ids, input_mask]
_lowerCAmelCase : List[Any] = model(__a)
_lowerCAmelCase : Any = model(__a)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForMaskedLM(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForNextSentencePrediction(config=__a)
_lowerCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFMobileBertForPreTraining(config=__a)
_lowerCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(
result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : Optional[Any] = TFMobileBertForSequenceClassification(config=__a)
_lowerCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.num_choices
_lowerCAmelCase : List[Any] = TFMobileBertForMultipleChoice(config=__a)
_lowerCAmelCase : Dict = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : List[str] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[int] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.num_labels
_lowerCAmelCase : Union[str, Any] = TFMobileBertForTokenClassification(config=__a)
_lowerCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForQuestionAnswering(config=__a)
_lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFMobileBertModelTest.TFMobileBertModelTester(self)
_lowerCAmelCase : List[Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
_lowerCAmelCase : List[Any] = TFMobileBertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
_lowerCAmelCase : Any = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowerCAmelCase : Tuple = model(__a)[0]
_lowerCAmelCase : Union[str, Any] = [1, 6, 3_0522]
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Tuple = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
])
tf.debugging.assert_near(output[:, :3, :3], __a, atol=1E-4)
| 300 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = """openai/whisper-base"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
SCREAMING_SNAKE_CASE_ : Tuple = """transcriber"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = WhisperProcessor
SCREAMING_SNAKE_CASE_ : Optional[int] = WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE_ : List[Any] = ["""audio"""]
SCREAMING_SNAKE_CASE_ : List[Any] = ["""text"""]
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : int ) -> Optional[int]:
return self.pre_processor(__lowerCamelCase , return_tensors="pt" ).input_features
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : List[str] ) -> Union[str, Any]:
return self.model.generate(inputs=__lowerCamelCase )
def __UpperCAmelCase ( self : str , __lowerCamelCase : Union[str, Any] ) -> Any:
return self.pre_processor.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )[0]
| 107 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {'vocab_file': 'spiece.model'}
__lowerCAmelCase : Optional[int] = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
__lowerCAmelCase : Dict = {'bert_for_seq_generation': 512}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : List[int] = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str="<s>" , __lowerCamelCase : Optional[int]="</s>" , __lowerCamelCase : int="<unk>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : Union[str, Any]="<::::>" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : List[str] , ) -> None:
a = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , sep_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Dict ) -> Dict:
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) -> Optional[Any]:
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : Optional[Any] , __lowerCamelCase : Dict ) -> Optional[Any]:
a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def __UpperCAmelCase ( self : str , __lowerCamelCase : Union[str, Any] ) -> int:
return self.sp_model.piece_to_id(__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[str] ) -> Any:
a = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Dict ) -> Any:
a = []
a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
a = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 107 | 1 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class lowercase__ :
'''simple docstring'''
def __init__( self : Optional[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = psutil.Process()
UpperCAmelCase_ = False
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = -1
while True:
UpperCAmelCase_ = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = True
UpperCAmelCase_ = threading.Thread(target=self.peak_monitor )
UpperCAmelCase_ = True
self.thread.start()
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = False
self.thread.join()
return self.cpu_memory_peak
lowerCamelCase = PeakCPUMemory()
def a__ ( ):
# Time
UpperCAmelCase_ = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
UpperCAmelCase_ = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
UpperCAmelCase_ = torch.cuda.memory_allocated(lowerCAmelCase__ )
torch.cuda.reset_peak_memory_stats()
return measures
def a__ ( lowerCAmelCase__ ):
# Time
UpperCAmelCase_ = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
UpperCAmelCase_ = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
UpperCAmelCase_ = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
UpperCAmelCase_ = (torch.cuda.memory_allocated(lowerCAmelCase__ ) - start_measures[str(lowerCAmelCase__ )]) / 2**20
UpperCAmelCase_ = (torch.cuda.max_memory_allocated(lowerCAmelCase__ ) - start_measures[str(lowerCAmelCase__ )]) / 2**20
return measures
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
print(f"""{description}:""" )
print(f"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(f"""- GPU {i} allocated: {measures[str(lowerCAmelCase__ )]:.2f}MiB""" )
UpperCAmelCase_ = measures[f"""{i}-peak"""]
print(f"""- GPU {i} peak: {peak:.2f}MiB""" )
print(f"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(f"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" )
| 241 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowerCamelCase = """"""
lowerCamelCase = """"""
lowerCamelCase = """"""
lowerCamelCase = 1 # (0 is vertical, 1 is horizontal)
def a__ ( ):
UpperCAmelCase_ , UpperCAmelCase_ = get_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
print("Processing..." )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = update_image_and_anno(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for index, image in enumerate(lowerCAmelCase__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCAmelCase_ = random_chars(32 )
UpperCAmelCase_ = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0]
UpperCAmelCase_ = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , lowerCAmelCase__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(lowerCAmelCase__ )} with {file_name}""" )
UpperCAmelCase_ = []
for anno in new_annos[index]:
UpperCAmelCase_ = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(lowerCAmelCase__ )
with open(f"""/{file_root}.txt""" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for label_file in glob.glob(os.path.join(lowerCAmelCase__ , "*.txt" ) ):
UpperCAmelCase_ = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(lowerCAmelCase__ ) as in_file:
UpperCAmelCase_ = in_file.readlines()
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , f"""{label_name}.jpg""" )
UpperCAmelCase_ = []
for obj_list in obj_lists:
UpperCAmelCase_ = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(lowerCAmelCase__ )
labels.append(lowerCAmelCase__ )
return img_paths, labels
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 ):
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for idx in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase_ = []
UpperCAmelCase_ = img_list[idx]
path_list.append(lowerCAmelCase__ )
UpperCAmelCase_ = anno_list[idx]
UpperCAmelCase_ = cva.imread(lowerCAmelCase__ )
if flip_type == 1:
UpperCAmelCase_ = cva.flip(lowerCAmelCase__ , lowerCAmelCase__ )
for bbox in img_annos:
UpperCAmelCase_ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCAmelCase_ = cva.flip(lowerCAmelCase__ , lowerCAmelCase__ )
for bbox in img_annos:
UpperCAmelCase_ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(lowerCAmelCase__ )
new_imgs_list.append(lowerCAmelCase__ )
return new_imgs_list, new_annos_lists, path_list
def a__ ( lowerCAmelCase__ = 32 ):
assert number_char > 1, "The number of character should greater than 1"
UpperCAmelCase_ = ascii_lowercase + digits
return "".join(random.choice(lowerCAmelCase__ ) for _ in range(lowerCAmelCase__ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 241 | 1 |
'''simple docstring'''
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
class A__ ( A__ ):
A__ = ['input_ids', 'attention_mask']
def __init__( self : Any , _a : List[str]="</s>" , _a : Optional[int]="<unk>" , _a : Optional[Any]="<pad>" , _a : Optional[int]=125 , _a : Optional[Any]=None , **_a : Optional[Any] , ) -> None:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
_SCREAMING_SNAKE_CASE =[f"<extra_id_{i}>" for i in range(_a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_SCREAMING_SNAKE_CASE =len(set(filter(lambda _a : bool('extra_id' in str(_a ) ) , _a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token
super().__init__(
eos_token=_a , unk_token=_a , pad_token=_a , extra_ids=_a , additional_special_tokens=_a , **_a , )
_SCREAMING_SNAKE_CASE =extra_ids
_SCREAMING_SNAKE_CASE =2**8 # utf is 8 bits
# define special tokens dict
_SCREAMING_SNAKE_CASE ={
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
_SCREAMING_SNAKE_CASE =len(self.special_tokens_encoder )
_SCREAMING_SNAKE_CASE =len(_a )
for i, token in enumerate(_a ):
_SCREAMING_SNAKE_CASE =self.vocab_size + i - n
_SCREAMING_SNAKE_CASE ={v: k for k, v in self.special_tokens_encoder.items()}
@property
def A ( self : str ) -> Dict:
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def A ( self : str , _a : List[int] , _a : Optional[List[int]] = None , _a : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_a )) + [1]
return ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def A ( self : str , _a : List[int] ) -> List[int]:
'''simple docstring'''
if len(_a ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def A ( self : Union[str, Any] , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def A ( self : Optional[int] , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self._add_eos_if_not_present(_a )
if token_ids_a is None:
return token_ids_a
else:
_SCREAMING_SNAKE_CASE =self._add_eos_if_not_present(_a )
return token_ids_a + token_ids_a
def A ( self : List[Any] , _a : str ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[chr(_a ) for i in text.encode('utf-8' )]
return tokens
def A ( self : List[Any] , _a : List[Any] ) -> List[Any]:
'''simple docstring'''
if token in self.special_tokens_encoder:
_SCREAMING_SNAKE_CASE =self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
_SCREAMING_SNAKE_CASE =self.added_tokens_encoder[token]
elif len(_a ) != 1:
_SCREAMING_SNAKE_CASE =self.unk_token_id
else:
_SCREAMING_SNAKE_CASE =ord(_a ) + self._num_special_tokens
return token_id
def A ( self : Tuple , _a : Optional[int] ) -> str:
'''simple docstring'''
if index in self.special_tokens_decoder:
_SCREAMING_SNAKE_CASE =self.special_tokens_decoder[index]
else:
_SCREAMING_SNAKE_CASE =chr(index - self._num_special_tokens )
return token
def A ( self : int , _a : int ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =b''
for token in tokens:
if token in self.special_tokens_decoder:
_SCREAMING_SNAKE_CASE =self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
_SCREAMING_SNAKE_CASE =self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
_SCREAMING_SNAKE_CASE =token.encode('utf-8' )
elif token in self.added_tokens_encoder:
_SCREAMING_SNAKE_CASE =token.encode('utf-8' )
else:
_SCREAMING_SNAKE_CASE =bytes([ord(_a )] )
bstring += tok_string
_SCREAMING_SNAKE_CASE =bstring.decode('utf-8' , errors='ignore' )
return string
def A ( self : int , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
return ()
| 47 | import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case_ ( __A ):
__A : Optional[Any] = ["image_processor", "tokenizer"]
__A : Tuple = "LayoutLMv3ImageProcessor"
__A : List[Any] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : Optional[Any] ) -> Optional[int]:
lowercase__ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
lowercase__ : Optional[int] = kwargs.pop("feature_extractor" )
lowercase__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ : Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ : Optional[Union[List[int], List[List[int]]]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : Dict , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
lowercase__ : Union[str, Any] = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase__ : Any = features["words"]
lowercase__ : Tuple = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
lowercase__ : Optional[int] = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowercase__ : Dict = self.get_overflowing_images(lowercase_ , encoded_inputs["overflow_to_sample_mapping"] )
lowercase__ : str = images
return encoded_inputs
def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[Any] ) -> Dict:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowercase__ : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F''' {len(lowercase_ )} and {len(lowercase_ )}''' )
return images_with_overflow
def __UpperCamelCase ( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : List[str] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : int ) -> Dict:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def __UpperCamelCase ( self : Any ) -> Any:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , )
return self.image_processor
| 87 | 0 |
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__lowercase = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class _A ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCAmelCase : Optional[int]):
super().__init__()
a : List[str] = torchvision.models.resnetaaa(pretrained=__UpperCAmelCase)
a : str = list(model.children())[:-2]
a : Tuple = nn.Sequential(*__UpperCAmelCase)
a : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds])
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any]):
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
a : str = self.pool(self.model(__UpperCAmelCase))
a : Any = torch.flatten(__UpperCAmelCase , start_dim=2)
a : Union[str, Any] = out.transpose(1 , 2).contiguous()
return out # BxNx2048
class _A ( _a ):
"""simple docstring"""
def __init__( self : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int]):
a : Dict = [json.loads(__UpperCAmelCase) for l in open(__UpperCAmelCase)]
a : Union[str, Any] = os.path.dirname(__UpperCAmelCase)
a : Dict = tokenizer
a : Dict = labels
a : Any = len(__UpperCAmelCase)
a : Tuple = max_seq_length
a : Union[str, Any] = transforms
def __len__( self : str):
return len(self.data)
def __getitem__( self : Union[str, Any] , __UpperCAmelCase : int):
a : Optional[int] = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=__UpperCAmelCase))
a , a , a : str = sentence[0], sentence[1:-1], sentence[-1]
a : Tuple = sentence[: self.max_seq_length]
a : Any = torch.zeros(self.n_classes)
a : int = 1
a : Any = Image.open(os.path.join(self.data_dir , self.data[index]["img"])).convert("RGB")
a : Optional[int] = self.transforms(__UpperCAmelCase)
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def __snake_case ( self : Optional[Any]):
a : Union[str, Any] = Counter()
for row in self.data:
label_freqs.update(row["label"])
return label_freqs
def lowercase ( A_ )-> Optional[Any]:
'''simple docstring'''
a : int = [len(row["sentence"] ) for row in batch]
a , a : Tuple = len(A_ ), max(A_ )
a : Union[str, Any] = torch.zeros(A_ , A_ , dtype=torch.long )
a : Any = torch.zeros(A_ , A_ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(A_ , A_ ) ):
a : int = input_row["sentence"]
a : Union[str, Any] = 1
a : str = torch.stack([row["image"] for row in batch] )
a : List[str] = torch.stack([row["label"] for row in batch] )
a : Union[str, Any] = torch.stack([row["image_start_token"] for row in batch] )
a : Any = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowercase ( )-> Optional[Any]:
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowercase ( )-> List[str]:
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4_6_7_7_7_0_4_4, 0.4_4_5_3_1_4_2_9, 0.4_0_6_6_1_0_1_7] , std=[0.1_2_2_2_1_9_9_4, 0.1_2_1_4_5_8_3_5, 0.1_4_3_8_0_4_6_9] , ),
] )
| 226 |
"""simple docstring"""
from __future__ import annotations
import queue
class _A :
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : Union[str, Any]):
a : Optional[Any] = data
a : Optional[int] = None
a : Union[str, Any] = None
def lowercase ( )-> TreeNode:
'''simple docstring'''
print("\n********Press N to stop entering at any point of time********\n" )
a : int = input("Enter the value of the root node: " ).strip().lower()
a : queue.Queue = queue.Queue()
a : Tuple = TreeNode(int(A_ ) )
q.put(A_ )
while not q.empty():
a : Union[str, Any] = q.get()
a : Optional[int] = F'''Enter the left node of {node_found.data}: '''
a : Union[str, Any] = input(A_ ).strip().lower() or "n"
if check == "n":
return tree_node
a : List[str] = TreeNode(int(A_ ) )
a : Any = left_node
q.put(A_ )
a : Dict = F'''Enter the right node of {node_found.data}: '''
a : str = input(A_ ).strip().lower() or "n"
if check == "n":
return tree_node
a : Any = TreeNode(int(A_ ) )
a : str = right_node
q.put(A_ )
raise
def lowercase ( A_ )-> None:
'''simple docstring'''
if not isinstance(A_ , A_ ) or not node:
return
print(node.data , end="," )
pre_order(node.left )
pre_order(node.right )
def lowercase ( A_ )-> None:
'''simple docstring'''
if not isinstance(A_ , A_ ) or not node:
return
in_order(node.left )
print(node.data , end="," )
in_order(node.right )
def lowercase ( A_ )-> None:
'''simple docstring'''
if not isinstance(A_ , A_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end="," )
def lowercase ( A_ )-> None:
'''simple docstring'''
if not isinstance(A_ , A_ ) or not node:
return
a : queue.Queue = queue.Queue()
q.put(A_ )
while not q.empty():
a : str = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowercase ( A_ )-> None:
'''simple docstring'''
if not isinstance(A_ , A_ ) or not node:
return
a : queue.Queue = queue.Queue()
q.put(A_ )
while not q.empty():
a : Any = []
while not q.empty():
a : Any = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(A_ )
def lowercase ( A_ )-> None:
'''simple docstring'''
if not isinstance(A_ , A_ ) or not node:
return
a : list[TreeNode] = []
a : int = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end="," )
stack.append(A_ )
a : Tuple = n.left
# end of while means current node doesn't have left child
a : Optional[Any] = stack.pop()
# start to traverse its right child
a : str = n.right
def lowercase ( A_ )-> None:
'''simple docstring'''
if not isinstance(A_ , A_ ) or not node:
return
a : list[TreeNode] = []
a : Union[str, Any] = node
while n or stack:
while n:
stack.append(A_ )
a : int = n.left
a : str = stack.pop()
print(n.data , end="," )
a : Optional[int] = n.right
def lowercase ( A_ )-> None:
'''simple docstring'''
if not isinstance(A_ , A_ ) or not node:
return
a , a : Tuple = [], []
a : Tuple = node
stacka.append(A_ )
while stacka: # to find the reversed order of post order, store it in stack2
a : Optional[Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(A_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end="," )
def lowercase ( A_ = "" , A_=50 , A_="*" )-> str:
'''simple docstring'''
if not s:
return "\n" + width * char
a , a : Dict = divmod(width - len(A_ ) - 2 , 2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
__lowercase = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 50 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 226 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Tuple = {'''vocab_file''': '''sentencepiece.model'''}
__SCREAMING_SNAKE_CASE :Union[str, Any] = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
__SCREAMING_SNAKE_CASE :List[Any] = {
'''google/rembert''': 256,
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[str] , snake_case_ : Tuple , snake_case_ : Any=False , snake_case_ : Tuple=True , snake_case_ : Tuple=True , snake_case_ : int="[CLS]" , snake_case_ : Optional[Any]="[SEP]" , snake_case_ : int="[UNK]" , snake_case_ : Optional[int]="[SEP]" , snake_case_ : Any="[PAD]" , snake_case_ : Any="[CLS]" , snake_case_ : int="[MASK]" , **snake_case_ : Optional[int] , ):
super().__init__(
do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(snake_case_ )
@property
def lowercase ( self : Union[str, Any] ):
return len(self.sp_model )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self : Tuple , snake_case_ : Dict ):
_UpperCAmelCase = d
_UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def lowercase ( self : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : str=False ):
_UpperCAmelCase = self.sp_model.EncodeAsPieces(snake_case_ )
return pieces
def lowercase ( self : Union[str, Any] , snake_case_ : Optional[Any] ):
return self.sp_model.PieceToId(snake_case_ )
def lowercase ( self : Dict , snake_case_ : Tuple ):
return self.sp_model.IdToPiece(snake_case_ )
def lowercase ( self : Dict , snake_case_ : Union[str, Any] ):
_UpperCAmelCase = self.sp_model.decode_pieces(snake_case_ )
return out_string
def lowercase ( self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase ( self : Optional[Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None , snake_case_ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1]
def lowercase ( self : Tuple , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : Tuple , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not os.path.isdir(snake_case_ ):
logger.error("Vocabulary path ({}) should be a directory".format(snake_case_ ) )
return
_UpperCAmelCase = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 22 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = 42
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = 1
@register_to_config
def __init__( self :Union[str, Any] , snake_case :int = 2_000 , snake_case :float = 0.15 , snake_case :float = 0.01 , snake_case :float = 1348.0 , snake_case :float = 1e-5 , snake_case :int = 1 , ):
'''simple docstring'''
A_ : Dict = sigma_max
# setable values
A_ : List[Any] = None
self.set_sigmas(snake_case , snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :torch.FloatTensor , snake_case :Optional[int] = None ):
'''simple docstring'''
return sample
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :int , snake_case :float = None , snake_case :Union[str, torch.device] = None ):
'''simple docstring'''
A_ : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
A_ : Tuple = torch.linspace(1 , snake_case , snake_case , device=snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :int , snake_case :float = None , snake_case :float = None , snake_case :float = None ):
'''simple docstring'''
A_ : Union[str, Any] = sigma_min if sigma_min is not None else self.config.sigma_min
A_ : Any = sigma_max if sigma_max is not None else self.config.sigma_max
A_ : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(snake_case , snake_case )
A_ : str = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
A_ : Any = torch.exp(torch.linspace(math.log(snake_case ) , math.log(snake_case ) , snake_case ) )
A_ : str = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] , snake_case :Dict ):
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :torch.FloatTensor , snake_case :int , snake_case :torch.FloatTensor , snake_case :Optional[torch.Generator] = None , snake_case :bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
A_ : int = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
A_ : Optional[Any] = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
A_ : Dict = timesteps.to(self.discrete_sigmas.device )
A_ : Optional[int] = self.discrete_sigmas[timesteps].to(sample.device )
A_ : int = self.get_adjacent_sigma(snake_case , snake_case ).to(sample.device )
A_ : Union[str, Any] = torch.zeros_like(snake_case )
A_ : Tuple = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
A_ : Optional[int] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
A_ : Tuple = diffusion.unsqueeze(-1 )
A_ : Optional[Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
A_ : List[Any] = randn_tensor(
sample.shape , layout=sample.layout , generator=snake_case , device=sample.device , dtype=sample.dtype )
A_ : List[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
A_ : Any = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=snake_case , prev_sample_mean=snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , snake_case :Optional[torch.Generator] = None , snake_case :bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
A_ : Dict = randn_tensor(sample.shape , layout=sample.layout , generator=snake_case ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
A_ : int = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
A_ : List[Any] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
A_ : Dict = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
A_ : Dict = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
A_ : int = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
A_ : str = step_size.unsqueeze(-1 )
A_ : Optional[Any] = sample + step_size * model_output
A_ : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , ):
'''simple docstring'''
A_ : Union[str, Any] = timesteps.to(original_samples.device )
A_ : List[Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
A_ : List[Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(snake_case ) * sigmas[:, None, None, None]
)
A_ : Optional[int] = noise + original_samples
return noisy_samples
def __len__( self :Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 300 | 0 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
lowerCamelCase__ = ["bart.large", "bart.large.mnli", "bart.large.cnn", "bart_xsum/model.pt"]
lowerCamelCase__ = {"bart.large": BartModel, "bart.large.mnli": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("0.9.0"):
raise Exception("requires fairseq >= 0.9.0")
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = " Hello world! cécé herlolip"
lowerCamelCase__ = [
("model.classification_heads.mnli.dense.weight", "classification_head.dense.weight"),
("model.classification_heads.mnli.dense.bias", "classification_head.dense.bias"),
("model.classification_heads.mnli.out_proj.weight", "classification_head.out_proj.weight"),
("model.classification_heads.mnli.out_proj.bias", "classification_head.out_proj.bias"),
]
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : Any = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
]
for k in ignore_keys:
state_dict.pop(lowercase_ ,lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = dct.pop(lowercase_ )
_UpperCamelCase : int = val
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Dict = torch.load(lowercase_ ,map_location="cpu" )
_UpperCamelCase : Dict = torch.hub.load("pytorch/fairseq" ,"bart.large.cnn" ).eval()
hub_interface.model.load_state_dict(sd["model"] )
return hub_interface
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase, _UpperCamelCase : Optional[Any] = emb.weight.shape
_UpperCamelCase : str = nn.Linear(lowercase_ ,lowercase_ ,bias=lowercase_ )
_UpperCamelCase : int = emb.weight.data
return lin_layer
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ) -> List[Any]:
"""simple docstring"""
if not os.path.exists(lowercase_ ):
_UpperCamelCase : Tuple = torch.hub.load("pytorch/fairseq" ,lowercase_ ).eval()
else:
_UpperCamelCase : Dict = load_xsum_checkpoint(lowercase_ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
_UpperCamelCase : Tuple = checkpoint_path.replace("." ,"-" )
_UpperCamelCase : Dict = BartConfig.from_pretrained(lowercase_ )
_UpperCamelCase : List[str] = bart.encode(lowercase_ ).unsqueeze(0 )
_UpperCamelCase : List[Any] = BartTokenizer.from_pretrained(lowercase_ ).encode(lowercase_ ,return_tensors="pt" ).unsqueeze(0 )
if not torch.eq(lowercase_ ,lowercase_ ).all():
raise ValueError(
F'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' )
if checkpoint_path == "bart.large.mnli":
_UpperCamelCase : List[str] = bart.state_dict()
remove_ignore_keys_(lowercase_ )
_UpperCamelCase : Any = state_dict["model.decoder.embed_tokens.weight"]
for src, dest in mnli_rename_keys:
rename_key(lowercase_ ,lowercase_ ,lowercase_ )
_UpperCamelCase : Optional[int] = BartForSequenceClassification(lowercase_ ).eval()
model.load_state_dict(lowercase_ )
_UpperCamelCase : List[Any] = bart.predict("mnli" ,lowercase_ ,return_logits=lowercase_ )
_UpperCamelCase : Dict = model(lowercase_ )[0] # logits
else: # no classification heads to worry about
_UpperCamelCase : Optional[Any] = bart.model.state_dict()
remove_ignore_keys_(lowercase_ )
_UpperCamelCase : Any = state_dict["decoder.embed_tokens.weight"]
_UpperCamelCase : Optional[Any] = bart.extract_features(lowercase_ )
if hf_checkpoint_name == "facebook/bart-large":
_UpperCamelCase : Dict = BartModel(lowercase_ ).eval()
model.load_state_dict(lowercase_ )
_UpperCamelCase : Dict = model(lowercase_ ).model[0]
else:
_UpperCamelCase : Union[str, Any] = BartForConditionalGeneration(lowercase_ ).eval() # an existing summarization ckpt
model.model.load_state_dict(lowercase_ )
if hasattr(lowercase_ ,"lm_head" ):
_UpperCamelCase : Tuple = make_linear_from_emb(model.model.shared )
_UpperCamelCase : str = model.model(lowercase_ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config", default=None, type=str, help="Which huggingface architecture to use: bart-large-xsum"
)
lowerCamelCase__ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 310 |
"""simple docstring"""
lowerCamelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Tuple = [False] * len(lowercase_ )
_UpperCamelCase : Dict = [s]
_UpperCamelCase : List[str] = True
while queue:
_UpperCamelCase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase_ )
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : List[str] = u
return visited[t]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : int = [-1] * (len(lowercase_ ))
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : str = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ):
_UpperCamelCase : int = float("Inf" )
_UpperCamelCase : Optional[Any] = sink
while s != source:
# Find the minimum value in select path
_UpperCamelCase : List[Any] = min(lowercase_ ,graph[parent[s]][s] )
_UpperCamelCase : Union[str, Any] = parent[s]
max_flow += path_flow
_UpperCamelCase : Union[str, Any] = sink
while v != source:
_UpperCamelCase : Optional[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCamelCase : Dict = parent[v]
for i in range(len(lowercase_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 310 | 1 |
"""simple docstring"""
import numpy as np
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
return np.where(vector > 0 , __lowerCamelCase , (alpha * (np.exp(__lowerCamelCase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "pix2struct_text_model"
snake_case = ["past_key_values"]
snake_case = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _SCREAMING_SNAKE_CASE=5_0244 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , )->int:
'''simple docstring'''
A_ : Optional[int] = vocab_size
A_ : Any = hidden_size
A_ : Optional[Any] = d_kv
A_ : int = d_ff
A_ : int = num_layers
A_ : Dict = num_heads
A_ : Any = relative_attention_num_buckets
A_ : int = relative_attention_max_distance
A_ : Optional[Any] = dropout_rate
A_ : Optional[Any] = layer_norm_epsilon
A_ : List[Any] = initializer_factor
A_ : Optional[int] = use_cache
A_ : Optional[Any] = eos_token_id
A_ : List[Any] = decoder_start_token_id
# for backwards compatibility
A_ : int = dense_act_fn
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , tie_word_embeddings=_SCREAMING_SNAKE_CASE , is_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
A_ , A_ : Union[str, Any] = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
A_ : Any = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "pix2struct_vision_model"
def __init__( self , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=1e-10 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=128 , **_SCREAMING_SNAKE_CASE , )->Optional[int]:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
A_ : Dict = hidden_size
A_ : Union[str, Any] = patch_embed_hidden_size
A_ : Optional[Any] = d_ff
A_ : Optional[Any] = dropout_rate
A_ : int = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : List[str] = initializer_range
A_ : List[str] = initializer_factor
A_ : Union[str, Any] = attention_dropout
A_ : Union[str, Any] = layer_norm_eps
A_ : Dict = dense_act_fn
A_ : Union[str, Any] = seq_len
A_ : Optional[Any] = relative_attention_num_buckets
A_ : Tuple = relative_attention_max_distance
A_ : List[Any] = d_kv
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
A_ , A_ : str = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
A_ : Any = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "pix2struct"
snake_case = True
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , )->Any:
'''simple docstring'''
super().__init__(tie_word_embeddings=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text_config is None:
A_ : Dict = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
A_ : List[Any] = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
A_ : int = PixaStructTextConfig(**_SCREAMING_SNAKE_CASE )
A_ : Dict = PixaStructVisionConfig(**_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = self.text_config.decoder_start_token_id
A_ : Tuple = self.text_config.pad_token_id
A_ : Union[str, Any] = self.text_config.eos_token_id
A_ : str = initializer_factor
A_ : Tuple = initializer_range
A_ : List[str] = self.initializer_range
A_ : int = self.initializer_range
A_ : List[Any] = is_vqa
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : List[Any] = copy.deepcopy(self.__dict__ )
A_ : Dict = self.text_config.to_dict()
A_ : int = self.vision_config.to_dict()
A_ : List[str] = self.__class__.model_type
return output
| 186 | 0 |
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class UpperCAmelCase_ ( _lowercase):
def __init__( self : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict=None , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=None , **__UpperCamelCase : Any ) -> Dict:
_UpperCamelCase = parent
_UpperCamelCase = config_class
_UpperCamelCase = has_text_modality
_UpperCamelCase = kwargs
_UpperCamelCase = common_properties
def _UpperCamelCase ( self : Optional[Any] ) -> List[str]:
_UpperCamelCase = self.config_class(**self.inputs_dict )
_UpperCamelCase = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) , msg=F'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(__UpperCamelCase ):
try:
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
self.parent.assertEqual(
getattr(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , msg=F'''`{name} value {idx} expected, but was {getattr(__UpperCamelCase , __UpperCamelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(__UpperCamelCase ):
try:
_UpperCamelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , msg=F'''`{name} value {idx} expected, but was {getattr(__UpperCamelCase , __UpperCamelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _UpperCamelCase ( self : Any ) -> List[str]:
_UpperCamelCase = self.config_class(**self.inputs_dict )
_UpperCamelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , __UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = os.path.join(__UpperCamelCase , '''config.json''' )
config_first.to_json_file(__UpperCamelCase )
_UpperCamelCase = self.config_class.from_json_file(__UpperCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _UpperCamelCase ( self : int ) -> List[str]:
_UpperCamelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(__UpperCamelCase )
_UpperCamelCase = self.config_class.from_pretrained(__UpperCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _UpperCamelCase ( self : Dict ) -> Any:
_UpperCamelCase = self.config_class(**self.inputs_dict )
_UpperCamelCase = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = os.path.join(__UpperCamelCase , __UpperCamelCase )
config_first.save_pretrained(__UpperCamelCase )
_UpperCamelCase = self.config_class.from_pretrained(__UpperCamelCase , subfolder=__UpperCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _UpperCamelCase ( self : Dict ) -> int:
_UpperCamelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_UpperCamelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _UpperCamelCase ( self : Any ) -> str:
if self.config_class.is_composition:
return
_UpperCamelCase = self.config_class()
self.parent.assertIsNotNone(__UpperCamelCase )
def _UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
_UpperCamelCase = copy.deepcopy(__UpperCamelCase )
_UpperCamelCase = self.config_class(**__UpperCamelCase )
_UpperCamelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(__UpperCamelCase , __UpperCamelCase ) != value:
wrong_values.append((key, getattr(__UpperCamelCase , __UpperCamelCase ), value) )
if len(__UpperCamelCase ) > 0:
_UpperCamelCase = '''\n'''.join([F'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(F'''The following keys were not properly set in the config:\n{errors}''' )
def _UpperCamelCase ( self : Tuple ) -> int:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 54 | """simple docstring"""
import qiskit
def lowercase ( a__ : int = 2 ) -> qiskit.result.counts.Counts:
_UpperCamelCase = qubits
# Using Aer's simulator
_UpperCamelCase = qiskit.Aer.get_backend('''aer_simulator''' )
# Creating a Quantum Circuit acting on the q register
_UpperCamelCase = qiskit.QuantumCircuit(a__ , a__ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , a__ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , a__ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(a__ ) ) , list(range(a__ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
_UpperCamelCase = qiskit.execute(a__ , a__ , shots=1000 )
return job.result().get_counts(a__ )
if __name__ == "__main__":
print(F'''Total count for various states are: {quantum_entanglement(3)}''')
| 54 | 1 |
import requests
from bsa import BeautifulSoup
def A ( _UpperCAmelCase : str , _UpperCAmelCase : dict ) -> str:
'''simple docstring'''
_UpperCAmelCase = BeautifulSoup(requests.get(_UpperCAmelCase , params=_UpperCAmelCase ).content , 'html.parser' )
_UpperCAmelCase = soup.find('div' , attrs={'class': 'gs_ri'} )
_UpperCAmelCase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
UpperCAmelCase__ = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 339 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[Any] , A : Dict , A : Union[str, Any]=13 , A : Dict=7 , A : Dict=True , A : Tuple=True , A : Union[str, Any]=True , A : int=True , A : Optional[int]=99 , A : List[str]=32 , A : List[Any]=5 , A : int=4 , A : Any=37 , A : Optional[int]="gelu" , A : Optional[Any]=0.1 , A : Any=0.1 , A : Union[str, Any]=5_12 , A : int=16 , A : List[str]=2 , A : Union[str, Any]=0.0_2 , A : Union[str, Any]=4 , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_choices
def _lowerCamelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase = None
if self.use_attention_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCAmelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = True
UpperCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCAmelCase = FlaxRoFormerModelTester(self)
@slow
def _lowerCamelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=A)
_UpperCAmelCase = model(np.ones((1, 1)))
self.assertIsNotNone(A)
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base')
_UpperCAmelCase = jnp.array([[0, 1, 2, 3, 4, 5]])
_UpperCAmelCase = model(A)[0]
_UpperCAmelCase = 5_00_00
_UpperCAmelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , A)
_UpperCAmelCase = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]])
self.assertTrue(jnp.allclose(output[:, :3, :3] , A , atol=1E-4))
| 339 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : int, _UpperCamelCase : int ) -> int:
while b:
A_ ,A_ = b, a % b
return a
def _UpperCAmelCase ( _UpperCamelCase : int, _UpperCamelCase : int ) -> int:
return a if b == 0 else euclidean_gcd_recursive(_UpperCamelCase, a % b )
def _UpperCAmelCase ( ) -> List[str]:
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3, 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5, 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1, 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3, 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6, 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3, 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5, 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1, 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3, 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6, 3 )}''' )
if __name__ == "__main__":
main()
| 18 | '''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=0.6 , _SCREAMING_SNAKE_CASE=None , ) -> Tuple:
A_ = parent
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = type_sequence_label_size
A_ = initializer_range
A_ = mask_ratio
A_ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
A_ = (image_size // patch_size) ** 2
A_ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __A ( self ) -> Union[str, Any]:
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Dict:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
A_ = ViTMAEModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
A_ = ViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
A_ = (self.image_size // self.patch_size) ** 2
A_ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
A_ = 1
A_ = ViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ = model(_SCREAMING_SNAKE_CASE )
A_ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __A ( self ) -> int:
A_ = self.prepare_config_and_inputs()
A_ ,A_ ,A_ = config_and_inputs
A_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : int = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__lowercase : List[Any] = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
__lowercase : Union[str, Any] = False
__lowercase : List[Any] = False
__lowercase : List[str] = False
__lowercase : List[str] = False
def __A ( self ) -> Any:
A_ = ViTMAEModelTester(self )
A_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __A ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def __A ( self ) -> int:
pass
def __A ( self ) -> int:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def __A ( self ) -> int:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> Union[str, Any]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[int]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
# make masks reproducible
np.random.seed(2 )
A_ = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
A_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A_ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
A_ = pt_noise
super().check_pt_tf_models(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> str:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A_ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
A_ = outputs[0].cpu().numpy()
A_ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
A_ = model_class.from_pretrained(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A_ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Make sure we don't have nans
A_ = after_outputs[0].cpu().numpy()
A_ = 0
A_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __A ( self ) -> List[str]:
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __A ( self ) -> Dict:
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __A ( self ) -> Tuple:
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def __A ( self ) -> str:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self ) -> Union[str, Any]:
pass
@slow
def __A ( self ) -> Dict:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = ViTMAEModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( ) -> Dict:
A_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __A ( self ) -> List[str]:
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def __A ( self ) -> List[str]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
A_ = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(_SCREAMING_SNAKE_CASE )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
A_ = ViTMAEConfig()
A_ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
A_ = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
A_ = model(**_SCREAMING_SNAKE_CASE , noise=torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE ) )
# verify the logits
A_ = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
A_ = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(_SCREAMING_SNAKE_CASE ) , atol=1E-4 ) )
| 18 | 1 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
# load base model
UpperCAmelCase__ : Any = StableDiffusionPipeline.from_pretrained(lowerCAmelCase__ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
UpperCAmelCase__ : Any = load_file(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
UpperCAmelCase__ : List[Any] = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
UpperCAmelCase__ : List[str] = pipeline.text_encoder
else:
UpperCAmelCase__ : Union[str, Any] = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
UpperCAmelCase__ : List[str] = pipeline.unet
# find the target layer
UpperCAmelCase__ : Tuple = layer_infos.pop(0 )
while len(lowerCAmelCase__ ) > -1:
try:
UpperCAmelCase__ : str = curr_layer.__getattr__(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
UpperCAmelCase__ : str = layer_infos.pop(0 )
elif len(lowerCAmelCase__ ) == 0:
break
except Exception:
if len(lowerCAmelCase__ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
UpperCAmelCase__ : Any = layer_infos.pop(0 )
UpperCAmelCase__ : Optional[Any] = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(lowerCAmelCase__ )
else:
pair_keys.append(lowerCAmelCase__ )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
UpperCAmelCase__ : Union[str, Any] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
UpperCAmelCase__ : Union[str, Any] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowerCAmelCase__ , lowerCAmelCase__ ).unsqueeze(2 ).unsqueeze(3 )
else:
UpperCAmelCase__ : int = state_dict[pair_keys[0]].to(torch.floataa )
UpperCAmelCase__ : Optional[Any] = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowerCAmelCase__ , lowerCAmelCase__ )
# update visited list
for item in pair_keys:
visited.append(lowerCAmelCase__ )
return pipeline
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = args.base_model_path
UpperCamelCase__ = args.checkpoint_path
UpperCamelCase__ = args.dump_path
UpperCamelCase__ = args.lora_prefix_unet
UpperCamelCase__ = args.lora_prefix_text_encoder
UpperCamelCase__ = args.alpha
UpperCamelCase__ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
UpperCamelCase__ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 181 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(lowerCAmelCase__ ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 181 | 1 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: List[Any] , lowerCAmelCase__: List[Any] ):
"""simple docstring"""
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase__ , PIL.Image.Image ):
UpperCAmelCase_: List[str] = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCAmelCase_: Tuple = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
UpperCAmelCase_: List[Any] = np.concatenate(lowerCAmelCase__ , axis=0 )
UpperCAmelCase_: int = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 255.0
UpperCAmelCase_: Tuple = image.transpose(0 , 3 , 1 , 2 )
UpperCAmelCase_: Optional[Any] = 2.0 * image - 1.0
UpperCAmelCase_: List[Any] = torch.from_numpy(lowerCAmelCase__ )
elif isinstance(image[0] , torch.Tensor ):
UpperCAmelCase_: int = torch.cat(lowerCAmelCase__ , dim=0 )
return image
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: int , lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: Any=0.9995 ):
"""simple docstring"""
if not isinstance(lowerCAmelCase__ , np.ndarray ):
UpperCAmelCase_: str = True
UpperCAmelCase_: Tuple = va.device
UpperCAmelCase_: str = va.cpu().numpy()
UpperCAmelCase_: List[Any] = va.cpu().numpy()
UpperCAmelCase_: Optional[int] = np.sum(va * va / (np.linalg.norm(lowerCAmelCase__ ) * np.linalg.norm(lowerCAmelCase__ )) )
if np.abs(lowerCAmelCase__ ) > DOT_THRESHOLD:
UpperCAmelCase_: Union[str, Any] = (1 - t) * va + t * va
else:
UpperCAmelCase_: Dict = np.arccos(lowerCAmelCase__ )
UpperCAmelCase_: Tuple = np.sin(lowerCAmelCase__ )
UpperCAmelCase_: List[str] = theta_a * t
UpperCAmelCase_: Tuple = np.sin(lowerCAmelCase__ )
UpperCAmelCase_: Optional[int] = np.sin(theta_a - theta_t ) / sin_theta_a
UpperCAmelCase_: Tuple = sin_theta_t / sin_theta_a
UpperCAmelCase_: Any = sa * va + sa * va
if inputs_are_torch:
UpperCAmelCase_: List[Any] = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
return va
def lowerCAmelCase_ (lowerCAmelCase__: str , lowerCAmelCase__: Optional[Any] ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = F.normalize(lowerCAmelCase__ , dim=-1 )
UpperCAmelCase_: Any = F.normalize(lowerCAmelCase__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCAmelCase_ (lowerCAmelCase__: Optional[int] , lowerCAmelCase__: Tuple ):
"""simple docstring"""
for param in model.parameters():
UpperCAmelCase_: List[str] = value
class _a ( _lowerCAmelCase ):
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, ) -> Any:
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_, text_encoder=SCREAMING_SNAKE_CASE_, clip_model=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_, unet=SCREAMING_SNAKE_CASE_, scheduler=SCREAMING_SNAKE_CASE_, feature_extractor=SCREAMING_SNAKE_CASE_, coca_model=SCREAMING_SNAKE_CASE_, coca_tokenizer=SCREAMING_SNAKE_CASE_, coca_transform=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: Optional[int] = (
feature_extractor.size
if isinstance(feature_extractor.size, SCREAMING_SNAKE_CASE_ )
else feature_extractor.size["""shortest_edge"""]
)
UpperCAmelCase_: Optional[int] = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std )
set_requires_grad(self.text_encoder, SCREAMING_SNAKE_CASE_ )
set_requires_grad(self.clip_model, SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_ = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_: Dict = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Optional[Any]:
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Optional[Any]:
set_requires_grad(self.vae, SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Optional[int]:
set_requires_grad(self.vae, SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Optional[Any]:
set_requires_grad(self.unet, SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Dict:
set_requires_grad(self.unet, SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
# get the original timestep using init_timestep
UpperCAmelCase_: Dict = min(int(num_inference_steps * strength ), SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = max(num_inference_steps - init_timestep, 0 )
UpperCAmelCase_: Any = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Optional[Any]:
if not isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ):
raise ValueError(f'`image` has to be of type `torch.Tensor` but is {type(SCREAMING_SNAKE_CASE_ )}' )
UpperCAmelCase_: int = image.to(device=SCREAMING_SNAKE_CASE_, dtype=SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: Tuple = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(SCREAMING_SNAKE_CASE_ )
]
UpperCAmelCase_: Optional[Any] = torch.cat(SCREAMING_SNAKE_CASE_, dim=0 )
else:
UpperCAmelCase_: Tuple = self.vae.encode(SCREAMING_SNAKE_CASE_ ).latent_dist.sample(SCREAMING_SNAKE_CASE_ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase_: Tuple = 0.1_8_2_1_5 * init_latents
UpperCAmelCase_: Union[str, Any] = init_latents.repeat_interleave(SCREAMING_SNAKE_CASE_, dim=0 )
UpperCAmelCase_: Any = randn_tensor(init_latents.shape, generator=SCREAMING_SNAKE_CASE_, device=SCREAMING_SNAKE_CASE_, dtype=SCREAMING_SNAKE_CASE_ )
# get latents
UpperCAmelCase_: Tuple = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = init_latents
return latents
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCAmelCase_: str = self.coca_transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
UpperCAmelCase_: Any = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype ) )
UpperCAmelCase_: List[str] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""", """""" ).rstrip(""" .,""" )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCAmelCase_: Tuple = self.feature_extractor.preprocess(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half()
UpperCAmelCase_: Tuple = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = image_embeddings_clip.repeat_interleave(SCREAMING_SNAKE_CASE_, dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> Union[str, Any]:
UpperCAmelCase_: int = latents.detach().requires_grad_()
UpperCAmelCase_: Dict = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# predict the noise residual
UpperCAmelCase_: Tuple = self.unet(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
UpperCAmelCase_: int = self.scheduler.alphas_cumprod[timestep]
UpperCAmelCase_: str = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_: List[Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
UpperCAmelCase_: Any = torch.sqrt(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler, SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: Optional[Any] = self.scheduler.sigmas[index]
UpperCAmelCase_: Tuple = latents - sigma * noise_pred
else:
raise ValueError(f'scheduler type {type(self.scheduler )} not supported' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase_: Tuple = 1 / 0.1_8_2_1_5 * sample
UpperCAmelCase_: str = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
UpperCAmelCase_: List[Any] = (image / 2 + 0.5).clamp(0, 1 )
UpperCAmelCase_: List[Any] = transforms.Resize(self.feature_extractor_size )(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = self.normalize(SCREAMING_SNAKE_CASE_ ).to(latents.dtype )
UpperCAmelCase_: str = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: str = spherical_dist_loss(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ).mean() * clip_guidance_scale
UpperCAmelCase_: str = -torch.autograd.grad(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )[0]
if isinstance(self.scheduler, SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: Optional[int] = latents.detach() + grads * (sigma**2)
UpperCAmelCase_: int = noise_pred_original
else:
UpperCAmelCase_: Optional[Any] = noise_pred_original - torch.sqrt(SCREAMING_SNAKE_CASE_ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = 512, SCREAMING_SNAKE_CASE_ = 512, SCREAMING_SNAKE_CASE_ = 0.6, SCREAMING_SNAKE_CASE_ = 50, SCREAMING_SNAKE_CASE_ = 7.5, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = 0.0, SCREAMING_SNAKE_CASE_ = 100, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = "pil", SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = 0.8, SCREAMING_SNAKE_CASE_ = 0.1, SCREAMING_SNAKE_CASE_ = 0.1, ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(f'You have passed {batch_size} batch_size, but only {len(SCREAMING_SNAKE_CASE_ )} generators.' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if isinstance(SCREAMING_SNAKE_CASE_, torch.Generator ) and batch_size > 1:
UpperCAmelCase_: str = [generator] + [None] * (batch_size - 1)
UpperCAmelCase_: List[str] = [
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
UpperCAmelCase_: str = [x[0] for x in coca_is_none if x[1]]
UpperCAmelCase_: Optional[int] = """, """.join(SCREAMING_SNAKE_CASE_ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f'Content prompt is None and CoCa [{coca_is_none_str}] is None.'
f'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
UpperCAmelCase_: Dict = self.get_image_description(SCREAMING_SNAKE_CASE_ )
if style_prompt is None:
if len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f'Style prompt is None and CoCa [{coca_is_none_str}] is None.'
f' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
UpperCAmelCase_: int = self.get_image_description(SCREAMING_SNAKE_CASE_ )
# get prompt text embeddings for content and style
UpperCAmelCase_: Any = self.tokenizer(
SCREAMING_SNAKE_CASE_, padding="""max_length""", max_length=self.tokenizer.model_max_length, truncation=SCREAMING_SNAKE_CASE_, return_tensors="""pt""", )
UpperCAmelCase_: Tuple = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
UpperCAmelCase_: int = self.tokenizer(
SCREAMING_SNAKE_CASE_, padding="""max_length""", max_length=self.tokenizer.model_max_length, truncation=SCREAMING_SNAKE_CASE_, return_tensors="""pt""", )
UpperCAmelCase_: Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
UpperCAmelCase_: Tuple = slerp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase_: Tuple = text_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE_, dim=0 )
# set timesteps
UpperCAmelCase_: Optional[Any] = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
UpperCAmelCase_: Tuple = {}
if accepts_offset:
UpperCAmelCase_: Union[str, Any] = 1
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
UpperCAmelCase_ , UpperCAmelCase_: List[Any] = self.get_timesteps(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, self.device )
UpperCAmelCase_: Optional[int] = timesteps[:1].repeat(SCREAMING_SNAKE_CASE_ )
# Preprocess image
UpperCAmelCase_: Union[str, Any] = preprocess(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = self.prepare_latents(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, text_embeddings.dtype, self.device, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = preprocess(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = self.prepare_latents(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, text_embeddings.dtype, self.device, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = slerp(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if clip_guidance_scale > 0:
UpperCAmelCase_: Union[str, Any] = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = slerp(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase_: Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase_: Optional[Any] = content_text_input.input_ids.shape[-1]
UpperCAmelCase_: Any = self.tokenizer([""""""], padding="""max_length""", max_length=SCREAMING_SNAKE_CASE_, return_tensors="""pt""" )
UpperCAmelCase_: List[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
UpperCAmelCase_: List[str] = uncond_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE_, dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_: List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase_: Optional[Any] = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase_: int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
UpperCAmelCase_: Any = torch.randn(SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, device="""cpu""", dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
UpperCAmelCase_: List[Any] = torch.randn(SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, device=self.device, dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
UpperCAmelCase_: Dict = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase_: Dict = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase_: Union[str, Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase_: str = {}
if accepts_eta:
UpperCAmelCase_: Any = eta
# check if the scheduler accepts generator
UpperCAmelCase_: Dict = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
UpperCAmelCase_: List[Any] = generator
with self.progress_bar(total=SCREAMING_SNAKE_CASE_ ):
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_: Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_: Dict = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# predict the noise residual
UpperCAmelCase_: Union[str, Any] = self.unet(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_: Any = noise_pred.chunk(2 )
UpperCAmelCase_: Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
UpperCAmelCase_: List[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
UpperCAmelCase_ , UpperCAmelCase_: Dict = self.cond_fn(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_: List[Any] = self.scheduler.step(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase_: str = 1 / 0.1_8_2_1_5 * latents
UpperCAmelCase_: int = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
UpperCAmelCase_: List[Any] = (image / 2 + 0.5).clamp(0, 1 )
UpperCAmelCase_: Tuple = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCAmelCase_: Union[str, Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_, nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 82 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
a : Tuple = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class _a ( unittest.TestCase ):
A = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
A = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
A = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCAmelCase_: Dict = ZeroShotClassificationPipeline(
model=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_, candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCAmelCase_: Dict = classifier("""Who are you voting for in 2020?""", candidate_labels="""politics""" )
self.assertEqual(SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ )]} )
# No kwarg
UpperCAmelCase_: Optional[int] = classifier("""Who are you voting for in 2020?""", ["""politics"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ )]} )
UpperCAmelCase_: Optional[int] = classifier("""Who are you voting for in 2020?""", candidate_labels=["""politics"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ )]} )
UpperCAmelCase_: List[Any] = classifier("""Who are you voting for in 2020?""", candidate_labels="""politics, public health""" )
self.assertEqual(
SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ), 1.0 )
UpperCAmelCase_: Tuple = classifier("""Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ), 1.0 )
UpperCAmelCase_: str = classifier(
"""Who are you voting for in 2020?""", candidate_labels="""politics""", hypothesis_template="""This text is about {}""" )
self.assertEqual(SCREAMING_SNAKE_CASE_, {"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ )]} )
# https://github.com/huggingface/transformers/issues/13846
UpperCAmelCase_: Union[str, Any] = classifier(["""I am happy"""], ["""positive""", """negative"""] )
self.assertEqual(
SCREAMING_SNAKE_CASE_, [
{"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )]}
for i in range(1 )
], )
UpperCAmelCase_: Dict = classifier(["""I am happy""", """I am sad"""], ["""positive""", """negative"""] )
self.assertEqual(
SCREAMING_SNAKE_CASE_, [
{"""sequence""": ANY(SCREAMING_SNAKE_CASE_ ), """labels""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )], """scores""": [ANY(SCREAMING_SNAKE_CASE_ ), ANY(SCREAMING_SNAKE_CASE_ )]}
for i in range(2 )
], )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier("""""", candidate_labels="""politics""" )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier(SCREAMING_SNAKE_CASE_, candidate_labels="""politics""" )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier("""Who are you voting for in 2020?""", candidate_labels="""""" )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier("""Who are you voting for in 2020?""", candidate_labels=SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier(
"""Who are you voting for in 2020?""", candidate_labels="""politics""", hypothesis_template="""Not formatting template""", )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
classifier(
"""Who are you voting for in 2020?""", candidate_labels="""politics""", hypothesis_template=SCREAMING_SNAKE_CASE_, )
self.run_entailment_id(SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: int = zero_shot_classifier.model.config
UpperCAmelCase_: Optional[int] = config.labelaid
UpperCAmelCase_: str = zero_shot_classifier.entailment_id
UpperCAmelCase_: Union[str, Any] = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id, -1 )
UpperCAmelCase_: int = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id, 0 )
UpperCAmelCase_: Dict = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id, 0 )
UpperCAmelCase_: Tuple = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id, 2 )
UpperCAmelCase_: Any = original_labelaid
self.assertEqual(SCREAMING_SNAKE_CASE_, zero_shot_classifier.entailment_id )
@require_torch
def __snake_case (self ) -> str:
UpperCAmelCase_: Any = pipeline(
"""zero-shot-classification""", model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""", framework="""pt""", )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 100, candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: str = pipeline(
"""zero-shot-classification""", model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""", framework="""pt""", )
UpperCAmelCase_: Tuple = zero_shot_classifier(
"""Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
}, )
@require_tf
def __snake_case (self ) -> int:
UpperCAmelCase_: List[Any] = pipeline(
"""zero-shot-classification""", model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""", framework="""tf""", )
UpperCAmelCase_: Optional[Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
}, )
@slow
@require_torch
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: List[Any] = pipeline("""zero-shot-classification""", model="""roberta-large-mnli""", framework="""pt""" )
UpperCAmelCase_: Optional[int] = zero_shot_classifier(
"""Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
}, )
UpperCAmelCase_: Optional[Any] = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""", candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""], multi_label=SCREAMING_SNAKE_CASE_, )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
}, )
@slow
@require_tf
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: List[str] = pipeline("""zero-shot-classification""", model="""roberta-large-mnli""", framework="""tf""" )
UpperCAmelCase_: Optional[Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
}, )
UpperCAmelCase_: Any = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""", candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""], multi_label=SCREAMING_SNAKE_CASE_, )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ), {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
}, )
| 82 | 1 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase ( ) -> Tuple:
UpperCamelCase : Any = HfArgumentParser(__UpperCAmelCase )
UpperCamelCase : Dict = parser.parse_args_into_dataclasses()[0]
UpperCamelCase : Tuple = TensorFlowBenchmark(args=__UpperCAmelCase )
try:
UpperCamelCase : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCamelCase : int = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
UpperCamelCase : str = """ """.join(str(__UpperCAmelCase ).split(' ' )[:-1] )
UpperCamelCase : List[str] = """"""
UpperCamelCase : Any = eval(str(__UpperCAmelCase ).split(' ' )[-1] )
UpperCamelCase : str = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
UpperCamelCase : int = full_error_msg + begin_error_msg + str(__UpperCAmelCase )
raise ValueError(__UpperCAmelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 119 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_A = 1_6
_A = 3_2
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase = 16 ) -> List[str]:
lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase__ : List[str] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ : str = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ : Union[str, Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ : Any = 8
else:
lowerCAmelCase__ : Any = None
return tokenizer.pad(
__UpperCAmelCase , padding="""longest""" , max_length=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCAmelCase__ : Any = DataLoader(
tokenized_datasets["""train"""] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_A = mocked_dataloaders # noqa: F811
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __UpperCAmelCase ) == "1":
lowerCAmelCase__ : List[Any] = 2
# New Code #
lowerCAmelCase__ : Tuple = int(args.gradient_accumulation_steps )
# Initialize accelerator
lowerCAmelCase__ : Union[str, Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__UpperCAmelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ : Tuple = config["""lr"""]
lowerCAmelCase__ : int = int(config["""num_epochs"""] )
lowerCAmelCase__ : List[Any] = int(config["""seed"""] )
lowerCAmelCase__ : Tuple = int(config["""batch_size"""] )
lowerCAmelCase__ : Optional[int] = evaluate.load("""glue""" , """mrpc""" )
set_seed(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = get_dataloaders(__UpperCAmelCase , __UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ : Optional[int] = AdamW(params=model.parameters() , lr=__UpperCAmelCase )
# Instantiate scheduler
lowerCAmelCase__ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Now we train the model
for epoch in range(__UpperCAmelCase ):
model.train()
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__UpperCAmelCase ):
lowerCAmelCase__ : str = model(**__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = output.loss
accelerator.backward(__UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ : int = model(**__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__UpperCAmelCase , references=__UpperCAmelCase , )
lowerCAmelCase__ : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __UpperCAmelCase )
def lowercase_ ( ) -> Any:
lowerCAmelCase__ : Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__UpperCAmelCase , default=__UpperCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__UpperCAmelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCAmelCase__ : List[str] = parser.parse_args()
lowerCAmelCase__ : Any = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
main()
| 242 | 0 |
from __future__ import annotations
__lowerCamelCase : Dict = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__lowerCamelCase : Union[str, Any] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[float] ) -> list[float]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = len(__UpperCamelCase )
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = -1
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] < arr[j]:
SCREAMING_SNAKE_CASE__ = arr[j]
break
result.append(__UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[float] ) -> list[float]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
for i, outer in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = -1
for inner in arr[i + 1 :]:
if outer < inner:
SCREAMING_SNAKE_CASE__ = inner
break
result.append(__UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[float] ) -> list[float]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = len(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = [-1] * arr_size
for index in reversed(range(__UpperCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
SCREAMING_SNAKE_CASE__ = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__lowerCamelCase : List[Any] = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 204 | import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__lowerCamelCase : int = 4
__lowerCamelCase : Dict = 3
class __snake_case ( lowerCamelCase_ ):
pass
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
for shard in shards:
for i in range(__UpperCamelCase ):
yield {"i": i, "shard": shard}
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = int(os.environ["""RANK"""] )
SCREAMING_SNAKE_CASE__ = int(os.environ["""WORLD_SIZE"""] )
SCREAMING_SNAKE_CASE__ = ArgumentParser()
parser.add_argument("""--streaming""" , type=__UpperCamelCase )
parser.add_argument("""--local_rank""" , type=__UpperCamelCase )
parser.add_argument("""--num_workers""" , type=__UpperCamelCase , default=0 )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = args.streaming
SCREAMING_SNAKE_CASE__ = args.num_workers
SCREAMING_SNAKE_CASE__ = {"""shards""": [f"""shard_{shard_idx}""" for shard_idx in range(__UpperCamelCase )]}
SCREAMING_SNAKE_CASE__ = IterableDataset.from_generator(__UpperCamelCase , gen_kwargs=__UpperCamelCase )
if not streaming:
SCREAMING_SNAKE_CASE__ = Dataset.from_list(list(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE__ = split_dataset_by_node(__UpperCamelCase , rank=__UpperCamelCase , world_size=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = torch.utils.data.DataLoader(__UpperCamelCase , num_workers=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = NUM_SHARDS * NUM_ITEMS_PER_SHARD
SCREAMING_SNAKE_CASE__ = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
SCREAMING_SNAKE_CASE__ = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 204 | 1 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCAmelCase : Dict = datasets.logging.get_logger(__name__)
UpperCAmelCase : List[str] = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
UpperCAmelCase : List[Any] = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
UpperCAmelCase : Optional[int] = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def __lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : Any=False , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : int=False , lowerCamelCase__ : Optional[Any]="dummy_doc" ):
'''simple docstring'''
lowerCamelCase = {doc: key_lines}
lowerCamelCase = {doc: sys_lines}
lowerCamelCase = {}
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase = reader.get_doc_mentions(lowerCamelCase__ , key_doc_lines[doc] , lowerCamelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase = reader.set_annotated_parse_trees(lowerCamelCase__ , key_doc_lines[doc] , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = reader.get_doc_mentions(lowerCamelCase__ , sys_doc_lines[doc] , lowerCamelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase = reader.set_annotated_parse_trees(lowerCamelCase__ , key_doc_lines[doc] , lowerCamelCase__ , lowerCamelCase__ )
if remove_nested:
lowerCamelCase = reader.remove_nested_coref_mentions(lowerCamelCase__ , lowerCamelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase = reader.remove_nested_coref_mentions(lowerCamelCase__ , lowerCamelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase = reader.get_mention_assignments(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = reader.get_mention_assignments(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
f'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
"""Number of resulting singleton clusters in the key """
f'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
f'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
"""files, respectively""" )
return doc_coref_infos
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[str] ):
'''simple docstring'''
lowerCamelCase = get_coref_infos(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = {}
lowerCamelCase = 0
lowerCamelCase = 0
for name, metric in metrics:
lowerCamelCase = evaluator.evaluate_documents(lowerCamelCase__ , lowerCamelCase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f'{name}/recall': recall, f'{name}/precision': precision, f'{name}/f1': fa} )
logger.info(
name.ljust(10 ) , f'Recall: {recall * 100:.2f}' , f' Precision: {precision * 100:.2f}' , f' F1: {fa * 100:.2f}' , )
if conll_subparts_num == 3:
lowerCamelCase = (conll / 3) * 100
logger.info(f'CoNLL score: {conll:.2f}' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def __lowerCamelCase ( lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
lowerCamelCase = line.split()[5]
if not parse_col == "-":
lowerCamelCase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def __A ( self , A , A , A=True , A=False , A=False , A=False ) -> str:
'''simple docstring'''
lowerCamelCase = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
lowerCamelCase = util.check_gold_parse_annotation(A )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase = evaluate(
key_lines=A , sys_lines=A , metrics=A , NP_only=A , remove_nested=A , keep_singletons=A , min_span=A , )
return score
| 252 |
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCAmelCase : List[Any] = len(bin(__magic_name__ )[3:] )
UpperCAmelCase : Optional[Any] = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase : Tuple = (
(
"1"
+ "0" * (binary_number_length - len(__magic_name__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311 | 0 |
class _a :
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : list[int] ):
A_ = len(UpperCAmelCase )
A_ = [0] * len_array
if len_array > 0:
A_ = array[0]
for i in range(1 , UpperCAmelCase ):
A_ = self.prefix_sum[i - 1] + array[i]
def __A ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : int ):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __A ( self : Tuple , UpperCAmelCase : int ):
A_ = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCAmelCase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 329 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a :Optional[Any] = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = original_name.split("." )[0]
A_ = key.split("." )
A_ = int(key_list[key_list.index(__UpperCamelCase ) - 2] )
A_ = int(key_list[key_list.index(__UpperCamelCase ) - 1] )
A_ = orig_block_num - offset
A_ = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' ,f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
A_ = OrderedDict()
A_ , A_ = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
A_ = key.replace("network" ,"poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
A_ = key[: key.find("proj" )]
A_ = key.replace(__UpperCamelCase ,f'''patch_embeddings.{total_embed_found}.''' )
A_ = key.replace("proj" ,"projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
A_ = "poolformer.encoder." + key
if "mlp.fc1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc1" ,"output.conv1" )
if "mlp.fc2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc2" ,"output.conv2" )
if "norm1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm1" ,"before_norm" )
if "norm2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm2" ,"after_norm" )
if "layer_scale_1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_1" ,"layer_scale_1" )
if "layer_scale_2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_2" ,"layer_scale_2" )
if "head" in key:
A_ = key.replace("head" ,"classifier" )
A_ = value
return new_state_dict
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return image
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = PoolFormerConfig()
# set attributes based on model_name
A_ = "huggingface/label-files"
A_ = model_name[-3:]
A_ = 1000
A_ = "imagenet-1k-id2label.json"
A_ = (1, 1000)
# set config attributes
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
if size == "s12":
A_ = [2, 2, 6, 2]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 0.9
elif size == "s24":
A_ = [4, 4, 12, 4]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 0.9
elif size == "s36":
A_ = [6, 6, 18, 6]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 1E-6
A_ = 0.9
elif size == "m36":
A_ = [6, 6, 18, 6]
A_ = [96, 192, 384, 768]
A_ = 4.0
A_ = 1E-6
A_ = 0.95
elif size == "m48":
A_ = [8, 8, 24, 8]
A_ = [96, 192, 384, 768]
A_ = 4.0
A_ = 1E-6
A_ = 0.95
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
# Prepare image
A_ = prepare_img()
A_ = image_processor(images=__UpperCamelCase ,return_tensors="pt" ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
A_ = torch.load(__UpperCamelCase ,map_location=torch.device("cpu" ) )
# rename keys
A_ = rename_keys(__UpperCamelCase )
# create HuggingFace model and load state dict
A_ = PoolFormerForImageClassification(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# Define image processor
A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
A_ = image_processor(images=prepare_img() ,return_tensors="pt" ).pixel_values
# forward pass
A_ = model(__UpperCamelCase )
A_ = outputs.logits
# define expected logit slices for different models
if size == "s12":
A_ = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
A_ = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
A_ = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
A_ = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
A_ = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1E-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__a :int = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path) | 329 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = 0
_snake_case = False
_snake_case = 3.0
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> Any:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"""a""": 2} )
self.assertDictEqual(MockClass(a=2 , b=snake_case_ ).to_kwargs() , {"""a""": 2, """b""": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"""a""": 2, """c""": 2.25} )
@require_cuda
def A__ ( self ) -> int:
# If no defaults are changed, `to_kwargs` returns an empty dict.
__lowerCAmelCase = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
__lowerCAmelCase = Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__lowerCAmelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , snake_case_ )
@require_multi_gpu
def A__ ( self ) -> int:
__lowerCAmelCase = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
SCREAMING_SNAKE_CASE_ = Accelerator(kwargs_handlers=[ddp_scaler])
SCREAMING_SNAKE_CASE_ = torch.nn.Linear(100, 200)
SCREAMING_SNAKE_CASE_ = accelerator.prepare(model)
# Check the values changed in kwargs
SCREAMING_SNAKE_CASE_ = ''''''
SCREAMING_SNAKE_CASE_ = model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 301 |
"""simple docstring"""
def lowercase (_lowerCAmelCase = 100_0000 ):
__lowerCAmelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _lowerCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 301 | 1 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def _snake_case ( ):
A__ = 10
A__ = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
A__ = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10,
"""id""": list(range(UpperCAmelCase_ ) ),
} , features=UpperCAmelCase_ , )
return dataset
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
A__ = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=UpperCAmelCase_ )
return filename
# FILE_CONTENT + files
SCREAMING_SNAKE_CASE_ : List[str] = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : Any ):
A__ = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
A__ = FILE_CONTENT
with open(UpperCAmelCase_ , """w""" ) as f:
f.write(UpperCAmelCase_ )
return filename
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : Tuple ):
import bza
A__ = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
A__ = bytes(UpperCAmelCase_ , """utf-8""" )
with bza.open(UpperCAmelCase_ , """wb""" ) as f:
f.write(UpperCAmelCase_ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : Any ):
import gzip
A__ = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
A__ = bytes(UpperCAmelCase_ , """utf-8""" )
with gzip.open(UpperCAmelCase_ , """wb""" ) as f:
f.write(UpperCAmelCase_ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : int ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
A__ = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
A__ = bytes(UpperCAmelCase_ , """utf-8""" )
with lza.frame.open(UpperCAmelCase_ , """wb""" ) as f:
f.write(UpperCAmelCase_ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
A__ = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(UpperCAmelCase_ , """w""" ) as archive:
archive.write(UpperCAmelCase_ , arcname=os.path.basename(UpperCAmelCase_ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int ):
import tarfile
A__ = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(UpperCAmelCase_ , """w""" ) as f:
f.add(UpperCAmelCase_ , arcname=os.path.basename(UpperCAmelCase_ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : int ):
import lzma
A__ = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
A__ = bytes(UpperCAmelCase_ , """utf-8""" )
with lzma.open(UpperCAmelCase_ , """wb""" ) as f:
f.write(UpperCAmelCase_ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any ):
import zipfile
A__ = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(UpperCAmelCase_ , """w""" ) as f:
f.write(UpperCAmelCase_ , arcname=os.path.basename(UpperCAmelCase_ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : Any ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
A__ = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
A__ = bytes(UpperCAmelCase_ , """utf-8""" )
with zstd.open(UpperCAmelCase_ , """wb""" ) as f:
f.write(UpperCAmelCase_ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
A__ = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(UpperCAmelCase_ , """w""" ) as f:
f.write(UpperCAmelCase_ )
return filename
SCREAMING_SNAKE_CASE_ : Any = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
SCREAMING_SNAKE_CASE_ : Optional[int] = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
SCREAMING_SNAKE_CASE_ : str = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
SCREAMING_SNAKE_CASE_ : List[str] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="""session""" )
def _snake_case ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : Tuple ):
A__ = datasets.Dataset.from_dict(UpperCAmelCase_ )
A__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=UpperCAmelCase_ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : Dict ):
A__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(UpperCAmelCase_ ) ) as con:
A__ = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : int ):
A__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(UpperCAmelCase_ , """w""" , newline="""""" ) as f:
A__ = csv.DictWriter(UpperCAmelCase_ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCAmelCase_ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : List[str] ):
A__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(UpperCAmelCase_ , """w""" , newline="""""" ) as f:
A__ = csv.DictWriter(UpperCAmelCase_ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCAmelCase_ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] ):
import bza
A__ = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(UpperCAmelCase_ , """rb""" ) as f:
A__ = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(UpperCAmelCase_ , """wb""" ) as f:
f.write(UpperCAmelCase_ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] ):
A__ = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(UpperCAmelCase_ , """w""" ) as f:
f.write(UpperCAmelCase_ , arcname=os.path.basename(UpperCAmelCase_ ) )
f.write(UpperCAmelCase_ , arcname=os.path.basename(UpperCAmelCase_ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict ):
A__ = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(UpperCAmelCase_ , """w""" ) as f:
f.write(UpperCAmelCase_ , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(UpperCAmelCase_ , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ):
A__ = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(UpperCAmelCase_ , """w""" ) as f:
f.write(UpperCAmelCase_ , arcname=os.path.join("""main_dir""" , os.path.basename(UpperCAmelCase_ ) ) )
f.write(UpperCAmelCase_ , arcname=os.path.join("""main_dir""" , os.path.basename(UpperCAmelCase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : str ):
A__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
A__ = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(UpperCAmelCase_ , """wb""" ) as f:
A__ = pq.ParquetWriter(UpperCAmelCase_ , schema=UpperCAmelCase_ )
A__ = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(UpperCAmelCase_ ) )] for k in DATA[0]} , schema=UpperCAmelCase_ )
writer.write_table(UpperCAmelCase_ )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : Union[str, Any] ):
A__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
A__ = {"""data""": DATA}
with open(UpperCAmelCase_ , """w""" ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : str ):
A__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
A__ = {"""data""": DATA_DICT_OF_LISTS}
with open(UpperCAmelCase_ , """w""" ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : Any ):
A__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(UpperCAmelCase_ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : Any ):
A__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(UpperCAmelCase_ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : Any ):
A__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(UpperCAmelCase_ , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : Tuple ):
A__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(UpperCAmelCase_ , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple ):
import gzip
A__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(UpperCAmelCase_ , """rb""" ) as orig_file:
with gzip.open(UpperCAmelCase_ , """wb""" ) as zipped_file:
zipped_file.writelines(UpperCAmelCase_ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int ):
import gzip
A__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(UpperCAmelCase_ , """rb""" ) as orig_file:
with gzip.open(UpperCAmelCase_ , """wb""" ) as zipped_file:
zipped_file.writelines(UpperCAmelCase_ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any ):
A__ = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(UpperCAmelCase_ , """w""" ) as f:
f.write(UpperCAmelCase_ , arcname=os.path.basename(UpperCAmelCase_ ) )
f.write(UpperCAmelCase_ , arcname=os.path.basename(UpperCAmelCase_ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] ):
A__ = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(UpperCAmelCase_ , """w""" ) as f:
f.write(UpperCAmelCase_ , arcname=os.path.join("""nested""" , os.path.basename(UpperCAmelCase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict ):
A__ = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(UpperCAmelCase_ , """w""" ) as f:
f.write(UpperCAmelCase_ , arcname=os.path.join("""main_dir""" , os.path.basename(UpperCAmelCase_ ) ) )
f.write(UpperCAmelCase_ , arcname=os.path.join("""main_dir""" , os.path.basename(UpperCAmelCase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] ):
A__ = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(UpperCAmelCase_ , """w""" ) as f:
f.add(UpperCAmelCase_ , arcname=os.path.basename(UpperCAmelCase_ ) )
f.add(UpperCAmelCase_ , arcname=os.path.basename(UpperCAmelCase_ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] ):
A__ = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(UpperCAmelCase_ , """w""" ) as f:
f.add(UpperCAmelCase_ , arcname=os.path.join("""nested""" , os.path.basename(UpperCAmelCase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : str ):
A__ = ["""0""", """1""", """2""", """3"""]
A__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(UpperCAmelCase_ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : int ):
A__ = ["""0""", """1""", """2""", """3"""]
A__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(UpperCAmelCase_ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : int ):
A__ = ["""0""", """1""", """2""", """3"""]
A__ = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(UpperCAmelCase_ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] ):
A__ = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(UpperCAmelCase_ , """w""" ) as f:
f.write(UpperCAmelCase_ , arcname=os.path.basename(UpperCAmelCase_ ) )
f.write(UpperCAmelCase_ , arcname=os.path.basename(UpperCAmelCase_ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] ):
A__ = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(UpperCAmelCase_ , """w""" ) as f:
f.write(UpperCAmelCase_ , arcname=os.path.join("""main_dir""" , os.path.basename(UpperCAmelCase_ ) ) )
f.write(UpperCAmelCase_ , arcname=os.path.join("""main_dir""" , os.path.basename(UpperCAmelCase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ):
A__ = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(UpperCAmelCase_ , """w""" ) as f:
f.write(UpperCAmelCase_ , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(UpperCAmelCase_ , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : Optional[int] ):
A__ = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
A__ = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(UpperCAmelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(UpperCAmelCase_ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( ):
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def _snake_case ( ):
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ):
A__ = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(UpperCAmelCase_ , """w""" ) as f:
f.write(UpperCAmelCase_ , arcname=os.path.basename(UpperCAmelCase_ ) )
f.write(UpperCAmelCase_ , arcname=os.path.basename(UpperCAmelCase_ ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
return data_dir
| 352 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
SCREAMING_SNAKE_CASE_ : int = logging.get_logger(__name__)
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Optional[int] , *UpperCamelCase: Optional[int] , **UpperCamelCase: Optional[int] ):
"""simple docstring"""
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 69 | 0 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_lowerCAmelCase : Optional[int] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __magic_name__ :
"""simple docstring"""
def __init__( self :int , snake_case :Dict , snake_case :Optional[int]=16 , snake_case :Tuple=13 , snake_case :Tuple=7 , snake_case :int=14 , snake_case :str=10 , snake_case :str=19 , snake_case :Optional[Any]=5 , snake_case :int=4 , snake_case :Any=True , snake_case :Dict=16 , snake_case :Union[str, Any]=2 , snake_case :List[Any]=4 , snake_case :Optional[Any]=4 , snake_case :Optional[Any]="gelu" , snake_case :int=0.1 , snake_case :Union[str, Any]=0.1 , snake_case :Union[str, Any]=[1, 2, 3, 4, 5] , snake_case :str=25 , snake_case :Tuple=5 , ):
'''simple docstring'''
A_ : Optional[int] = d_model
A_ : Tuple = parent
A_ : List[str] = batch_size
A_ : Dict = prediction_length
A_ : List[str] = context_length
A_ : List[str] = cardinality
A_ : Dict = num_time_features
A_ : Optional[int] = lags_sequence
A_ : List[str] = embedding_dimension
A_ : str = is_training
A_ : Tuple = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : Union[str, Any] = intermediate_size
A_ : List[str] = hidden_act
A_ : List[str] = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Tuple = context_length
A_ : Any = prediction_length + label_length
A_ : str = label_length
A_ : Union[str, Any] = moving_average
A_ : Union[str, Any] = autocorrelation_factor
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :str ):
'''simple docstring'''
A_ : str = config.context_length + max(config.lags_sequence )
A_ : Tuple = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
A_ : List[str] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
A_ : List[str] = floats_tensor([self.batch_size, _past_length] )
A_ : List[Any] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
A_ : Optional[int] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
A_ : Optional[Any] = floats_tensor([self.batch_size, config.prediction_length] )
A_ : Any = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : str = self.get_config()
A_ : int = self.prepare_autoformer_inputs_dict(snake_case )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Dict , snake_case :str ):
'''simple docstring'''
A_ : Tuple = AutoformerModel(config=snake_case ).to(snake_case ).eval()
A_ : Optional[Any] = model(**snake_case )
A_ : Optional[Any] = outputs.encoder_last_hidden_state
A_ : Tuple = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : int = model.get_encoder()
encoder.save_pretrained(snake_case )
A_ : Union[str, Any] = AutoformerEncoder.from_pretrained(snake_case ).to(snake_case )
A_ , A_ , A_ , A_ , A_ : str = model.create_network_inputs(**snake_case )
A_ , A_ : Optional[int] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
A_ : List[str] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
A_ : int = encoder(inputs_embeds=snake_case )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
A_ : Any = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
A_ : Union[str, Any] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
A_ : List[str] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
A_ : int = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : List[str] = model.get_decoder()
decoder.save_pretrained(snake_case )
A_ : List[str] = AutoformerDecoder.from_pretrained(snake_case ).to(snake_case )
A_ : List[str] = decoder(
trend=snake_case , inputs_embeds=snake_case , encoder_hidden_states=snake_case , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__UpperCamelCase = (AutoformerForPrediction,) if is_torch_available() else ()
__UpperCamelCase = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : List[str] = AutoformerModelTester(self )
A_ : Union[str, Any] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ , A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
A_ : List[Any] = model_class(snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case )
A_ , A_ : Any = model_class.from_pretrained(snake_case , output_loading_info=snake_case )
self.assertEqual(info["missing_keys"] , [] )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case )
@unittest.skip(reason="Model has no tokens embeddings" )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Optional[int] = inspect.signature(getattr(snake_case , "forward" ) )
# The main input is the name of the argument after `self`
A_ : Dict = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Union[str, Any] = model_class(snake_case )
A_ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : List[Any] = [*signature.parameters.keys()]
A_ : Optional[int] = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(snake_case )] , snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Dict = True
A_ : Any = getattr(self.model_tester , "seq_length" , snake_case )
A_ : List[str] = getattr(self.model_tester , "decoder_seq_length" , snake_case )
A_ : str = getattr(self.model_tester , "encoder_seq_length" , snake_case )
A_ : List[Any] = getattr(self.model_tester , "d_model" , snake_case )
A_ : str = getattr(self.model_tester , "num_attention_heads" , snake_case )
A_ : Dict = d_model // num_attention_heads
for model_class in self.all_model_classes:
A_ : Optional[int] = True
A_ : List[Any] = False
A_ : Union[str, Any] = True
A_ : Optional[int] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
A_ : Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
A_ : str = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A_ : str = True
A_ : Dict = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
A_ : Optional[Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
A_ : int = outputs.encoder_attentions
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
A_ : Any = len(snake_case )
A_ : Dict = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case , snake_case )
# decoder attentions
A_ : Dict = outputs.decoder_attentions
self.assertIsInstance(snake_case , (list, tuple) )
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
A_ : Optional[Any] = outputs.cross_attentions
self.assertIsInstance(snake_case , (list, tuple) )
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
A_ : Tuple = True
A_ : Union[str, Any] = True
A_ : str = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
A_ : Optional[int] = model(**self._prepare_for_class(snake_case , snake_case ) )
self.assertEqual(out_len + 2 , len(snake_case ) )
A_ : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def __snake_case ( _lowerCAmelCase : Any="train-batch.pt" ) -> List[str]:
A_ : Optional[int] = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=_lowerCAmelCase , repo_type="dataset" )
A_ : Optional[int] = torch.load(_lowerCAmelCase , map_location=_lowerCAmelCase )
return batch
@require_torch
@slow
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Any = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case )
A_ : str = prepare_batch()
with torch.no_grad():
A_ : Dict = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
A_ : Dict = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case )
A_ : int = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=snake_case )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case , atol=snake_case ) )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Optional[int] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case )
A_ : Optional[Any] = prepare_batch("val-batch.pt" )
with torch.no_grad():
A_ : Optional[Any] = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
A_ : int = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case )
A_ : Union[str, Any] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=snake_case )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case , atol=snake_case ) )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : List[Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case )
A_ : Any = prepare_batch("val-batch.pt" )
with torch.no_grad():
A_ : int = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
A_ : List[str] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case )
A_ : Optional[int] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=snake_case )
A_ : Dict = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case , rtol=1e-1 ) )
| 300 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , snake_case :AutoencoderKL , snake_case :CLIPTextModel , snake_case :CLIPTokenizer , snake_case :UNetaDConditionModel , snake_case :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , snake_case :StableDiffusionSafetyChecker , snake_case :CLIPImageProcessor , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A_ : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def __call__( self :Any , snake_case :Union[str, List[str]] , snake_case :int = 512 , snake_case :int = 512 , snake_case :int = 50 , snake_case :float = 7.5 , snake_case :Optional[Union[str, List[str]]] = None , snake_case :Optional[int] = 1 , snake_case :float = 0.0 , snake_case :Optional[torch.Generator] = None , snake_case :Optional[torch.FloatTensor] = None , snake_case :Optional[str] = "pil" , snake_case :bool = True , snake_case :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case :int = 1 , snake_case :Optional[torch.FloatTensor] = None , **snake_case :Optional[Any] , ):
'''simple docstring'''
if isinstance(snake_case , snake_case ):
A_ : Dict = 1
elif isinstance(snake_case , snake_case ):
A_ : Optional[Any] = len(snake_case )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case , snake_case ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case )}." )
# get prompt text embeddings
A_ : int = self.tokenizer(
snake_case , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A_ : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A_ : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
A_ : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
A_ : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A_ , A_ , A_ : int = text_embeddings.shape
A_ : List[str] = text_embeddings.repeat(1 , snake_case , 1 )
A_ : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A_ : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A_ : List[str]
if negative_prompt is None:
A_ : List[str] = [""]
elif type(snake_case ) is not type(snake_case ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(snake_case )} !="
f" {type(snake_case )}." )
elif isinstance(snake_case , snake_case ):
A_ : Optional[Any] = [negative_prompt]
elif batch_size != len(snake_case ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(snake_case )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
A_ : Any = negative_prompt
A_ : Optional[int] = text_input_ids.shape[-1]
A_ : Dict = self.tokenizer(
snake_case , padding="max_length" , max_length=snake_case , truncation=snake_case , return_tensors="pt" , )
A_ : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A_ : Tuple = uncond_embeddings.shape[1]
A_ : Dict = uncond_embeddings.repeat(snake_case , snake_case , 1 )
A_ : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A_ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A_ : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A_ : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
A_ : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A_ : Tuple = torch.randn(
snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(self.device )
A_ : Optional[Any] = torch.randn(snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(
self.device )
else:
A_ : int = torch.randn(
snake_case , generator=snake_case , device=self.device , dtype=snake_case )
A_ : Optional[int] = torch.randn(snake_case , generator=snake_case , device=self.device , dtype=snake_case )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
A_ : Tuple = latents_reference.to(self.device )
A_ : Any = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
A_ : List[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
A_ : Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2
A_ : Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
A_ : Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
A_ : Optional[Any] = 0 if dx < 0 else dx
A_ : Optional[Any] = 0 if dy < 0 else dy
A_ : List[str] = max(-dx , 0 )
A_ : List[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
A_ : Any = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A_ : str = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A_ : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ : List[str] = {}
if accepts_eta:
A_ : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
A_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : Any = self.scheduler.scale_model_input(snake_case , snake_case )
# predict the noise residual
A_ : List[str] = self.unet(snake_case , snake_case , encoder_hidden_states=snake_case ).sample
# perform guidance
if do_classifier_free_guidance:
A_ , A_ : Dict = noise_pred.chunk(2 )
A_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A_ : Tuple = self.scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case , snake_case , snake_case )
A_ : List[str] = 1 / 0.18215 * latents
A_ : Tuple = self.vae.decode(snake_case ).sample
A_ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
A_ : int = self.feature_extractor(self.numpy_to_pil(snake_case ) , return_tensors="pt" ).to(
self.device )
A_ , A_ : List[str] = self.safety_checker(
images=snake_case , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
A_ : List[str] = None
if output_type == "pil":
A_ : Optional[int] = self.numpy_to_pil(snake_case )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=snake_case , nsfw_content_detected=snake_case )
| 300 | 1 |
"""simple docstring"""
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( _UpperCamelCase):
__UpperCamelCase = (CMStochasticIterativeScheduler,)
__UpperCamelCase = 10
def a_ ( self : Optional[Any] , **a__ : Tuple ) -> Dict:
'''simple docstring'''
_A = {
"num_train_timesteps": 2_01,
"sigma_min": 0.0_0_2,
"sigma_max": 8_0.0,
}
config.update(**a__ )
return config
def a_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
_A = 10
_A = self.get_scheduler_config()
_A = self.scheduler_classes[0](**a__ )
scheduler.set_timesteps(a__ )
_A = scheduler.timesteps[0]
_A = scheduler.timesteps[1]
_A = self.dummy_sample
_A = 0.1 * sample
_A = scheduler.step(a__ , a__ , a__ ).prev_sample
_A = scheduler.step(a__ , a__ , a__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def a_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a__ )
def a_ ( self : int ) -> str:
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=a__ )
def a_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**a__ )
_A = 1
scheduler.set_timesteps(a__ )
_A = scheduler.timesteps
_A = torch.manual_seed(0 )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(a__ ):
# 1. scale model input
_A = scheduler.scale_model_input(a__ , a__ )
# 2. predict noise residual
_A = model(a__ , a__ )
# 3. predict previous sample x_t-1
_A = scheduler.step(a__ , a__ , a__ , generator=a__ ).prev_sample
_A = pred_prev_sample
_A = torch.sum(torch.abs(a__ ) )
_A = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 1_9_2.7_6_1_4 ) < 1E-2
assert abs(result_mean.item() - 0.2_5_1_0 ) < 1E-3
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**a__ )
_A = [1_06, 0]
scheduler.set_timesteps(timesteps=a__ )
_A = scheduler.timesteps
_A = torch.manual_seed(0 )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
_A = scheduler.scale_model_input(a__ , a__ )
# 2. predict noise residual
_A = model(a__ , a__ )
# 3. predict previous sample x_t-1
_A = scheduler.step(a__ , a__ , a__ , generator=a__ ).prev_sample
_A = pred_prev_sample
_A = torch.sum(torch.abs(a__ ) )
_A = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 3_4_7.6_3_5_7 ) < 1E-2
assert abs(result_mean.item() - 0.4_5_2_7 ) < 1E-3
def a_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**a__ )
_A = [39, 30, 12, 15, 0]
with self.assertRaises(a__ , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=a__ )
def a_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**a__ )
_A = [39, 30, 12, 1, 0]
_A = len(a__ )
with self.assertRaises(a__ , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=a__ , timesteps=a__ )
def a_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**a__ )
_A = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a__ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=a__ ) | 163 |
"""simple docstring"""
def a__ ( __lowercase ) -> int:
assert (
isinstance(__lowercase , __lowercase ) and number_of_steps > 0
), f"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
_A , _A = 1, 1
for _ in range(number_of_steps - 1 ):
_A , _A = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod() | 163 | 1 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase = "cpu" , __UpperCamelCase = None ) -> None:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__UpperCamelCase , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
lowerCAmelCase_ : int = v.half()
if save_path is None: # overwrite src_path
lowerCAmelCase_ : Tuple = src_path
torch.save(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 241 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = 0
if start < end:
lowerCAmelCase_ : Dict = randint(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : List[str] = a[end]
lowerCAmelCase_ : List[str] = a[pivot]
lowerCAmelCase_ : Any = temp
lowerCAmelCase_ , lowerCAmelCase_ : Any = _in_place_partition(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
count += _in_place_quick_sort(__UpperCamelCase , __UpperCamelCase , p - 1 )
count += _in_place_quick_sort(__UpperCamelCase , p + 1 , __UpperCamelCase )
return count
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Tuple = randint(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : str = a[end]
lowerCAmelCase_ : List[Any] = a[pivot]
lowerCAmelCase_ : Optional[Any] = temp
lowerCAmelCase_ : Dict = start - 1
for index in range(__UpperCamelCase , __UpperCamelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase_ : Dict = new_pivot_index + 1
lowerCAmelCase_ : Tuple = a[new_pivot_index]
lowerCAmelCase_ : List[Any] = a[index]
lowerCAmelCase_ : Optional[Any] = temp
lowerCAmelCase_ : Any = a[new_pivot_index + 1]
lowerCAmelCase_ : int = a[end]
lowerCAmelCase_ : str = temp
return new_pivot_index + 1, count
lowercase__ = TemporaryFile()
lowercase__ = 100 # 1000 elements are to be sorted
lowercase__ , lowercase__ = 0, 1 # mean and standard deviation
lowercase__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
lowercase__ = np.load(outfile)
lowercase__ = len(M) - 1
lowercase__ = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 241 | 1 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : torch.FloatTensor
a_ : Optional[torch.FloatTensor] =None
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Dict=0.9_9_9 , lowerCAmelCase: str="cosine" , )-> str:
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase: Union[str, Any] ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase: List[Any] ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_snake_case : Optional[Any] = []
for i in range(lowerCAmelCase ):
_snake_case : List[str] = i / num_diffusion_timesteps
_snake_case : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCAmelCase ) / alpha_bar_fn(lowerCAmelCase ) , lowerCAmelCase ) )
return torch.tensor(lowerCAmelCase , dtype=torch.floataa )
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
a_ : Any =1
@register_to_config
def __init__( self : Optional[int] , UpperCamelCase : int = 10_00 , UpperCamelCase : float = 0.00_01 , UpperCamelCase : float = 0.02 , UpperCamelCase : str = "linear" , UpperCamelCase : Optional[Union[np.ndarray, List[float]]] = None , UpperCamelCase : bool = True , UpperCamelCase : bool = True , UpperCamelCase : int = 0 , UpperCamelCase : str = "epsilon" , UpperCamelCase : float = 1.0 , **UpperCamelCase : List[str] , ):
'''simple docstring'''
if kwargs.get('set_alpha_to_one' , UpperCamelCase ) is not None:
_snake_case : List[Any] = (
'The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'
)
deprecate('set_alpha_to_one' , '1.0.0' , UpperCamelCase , standard_warn=UpperCamelCase )
_snake_case : Optional[Any] = kwargs['set_alpha_to_one']
if trained_betas is not None:
_snake_case : Optional[Any] = torch.tensor(UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_snake_case : str = torch.linspace(UpperCamelCase , UpperCamelCase , UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_snake_case : str = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_snake_case : int = betas_for_alpha_bar(UpperCamelCase )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
_snake_case : Any = 1.0 - self.betas
_snake_case : Optional[Any] = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_snake_case : Dict = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_snake_case : List[Any] = 1.0
# setable values
_snake_case : Any = None
_snake_case : Any = torch.from_numpy(np.arange(0 , UpperCamelCase ).copy().astype(np.intaa ) )
def UpperCamelCase_ ( self : Any , UpperCamelCase : torch.FloatTensor , UpperCamelCase : Optional[int] = None ):
'''simple docstring'''
return sample
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f"""`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"""
f""" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"""
f""" maximal {self.config.num_train_timesteps} timesteps.""" )
_snake_case : int = num_inference_steps
_snake_case : List[Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_snake_case : Optional[Any] = (np.arange(0 , UpperCamelCase ) * step_ratio).round().copy().astype(np.intaa )
_snake_case : str = torch.from_numpy(UpperCamelCase ).to(UpperCamelCase )
self.timesteps += self.config.steps_offset
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = False , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_snake_case : List[str] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_snake_case : str = self.alphas_cumprod[timestep]
_snake_case : Optional[int] = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_snake_case : List[Any] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_snake_case : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_snake_case : int = model_output
elif self.config.prediction_type == "sample":
_snake_case : Tuple = model_output
_snake_case : str = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_snake_case : Any = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_snake_case : Union[str, Any] = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"""
' `v_prediction`' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_snake_case : Any = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_snake_case : int = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_snake_case : str = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
def __len__( self : List[Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 260 |
from __future__ import annotations
lowerCAmelCase_ = []
def lowerCamelCase_ ( lowerCAmelCase: list[list[int]] , lowerCAmelCase: int , lowerCAmelCase: int )-> bool:
for i in range(len(lowerCAmelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(lowerCAmelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowerCAmelCase , -1 , -1 ) , range(lowerCAmelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowerCAmelCase , -1 , -1 ) , range(lowerCAmelCase , len(lowerCAmelCase ) ) ):
if board[i][j] == 1:
return False
return True
def lowerCamelCase_ ( lowerCAmelCase: list[list[int]] , lowerCAmelCase: int )-> bool:
if row >= len(lowerCAmelCase ):
solution.append(lowerCAmelCase )
printboard(lowerCAmelCase )
print()
return True
for i in range(len(lowerCAmelCase ) ):
if is_safe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
_snake_case : Dict = 1
solve(lowerCAmelCase , row + 1 )
_snake_case : str = 0
return False
def lowerCamelCase_ ( lowerCAmelCase: list[list[int]] )-> None:
for i in range(len(lowerCAmelCase ) ):
for j in range(len(lowerCAmelCase ) ):
if board[i][j] == 1:
print('Q' , end=' ' )
else:
print('.' , end=' ' )
print()
# n=int(input("The no. of queens"))
lowerCAmelCase_ = 8
lowerCAmelCase_ = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("""The total no. of solutions are :""", len(solution))
| 260 | 1 |
from itertools import product
def a ( _UpperCAmelCase : int , _UpperCAmelCase : int ):
'''simple docstring'''
__UpperCAmelCase : str = sides_number
__UpperCAmelCase : Optional[Any] = max_face_number * dice_number
__UpperCAmelCase : Optional[int] = [0] * (max_total + 1)
__UpperCAmelCase : int = 1
__UpperCAmelCase : Tuple = range(_UpperCAmelCase , max_face_number + 1 )
for dice_numbers in product(_UpperCAmelCase , repeat=_UpperCAmelCase ):
__UpperCAmelCase : int = sum(_UpperCAmelCase )
totals_frequencies[total] += 1
return totals_frequencies
def a ( ):
'''simple docstring'''
__UpperCAmelCase : str = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__UpperCAmelCase : Any = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Tuple = 9
__UpperCAmelCase : List[Any] = 4 * 9
__UpperCAmelCase : Dict = 6
for peter_total in range(_UpperCAmelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__UpperCAmelCase : Optional[int] = (4**9) * (6**6)
__UpperCAmelCase : Optional[Any] = peter_wins_count / total_games_number
__UpperCAmelCase : str = round(_UpperCAmelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'''{solution() = }''')
| 226 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 226 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowercase : Union[str, Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A ( __snake_case ):
__magic_name__ = ['''pixel_values''']
def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 255 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , **SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
A : Dict = size if size is not None else {'''shortest_edge''': 224}
A : Dict = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
A : int = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
A : Any = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
A : Optional[Any] = do_resize
A : Union[str, Any] = size
A : Optional[Any] = resample
A : int = do_center_crop
A : str = crop_size
A : List[str] = do_rescale
A : Dict = rescale_factor
A : Optional[Any] = do_normalize
A : str = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A : Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD
A : int = do_convert_rgb
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
A : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A : str = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE )
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
A : int = get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image:
"""simple docstring"""
A : Optional[Any] = do_resize if do_resize is not None else self.do_resize
A : Optional[Any] = size if size is not None else self.size
A : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE , param_name='''size''' , default_to_square=SCREAMING_SNAKE_CASE )
A : str = resample if resample is not None else self.resample
A : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
A : Tuple = crop_size if crop_size is not None else self.crop_size
A : int = get_size_dict(SCREAMING_SNAKE_CASE , param_name='''crop_size''' , default_to_square=SCREAMING_SNAKE_CASE )
A : Dict = do_rescale if do_rescale is not None else self.do_rescale
A : str = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
A : Dict = image_mean if image_mean is not None else self.image_mean
A : Any = image_std if image_std is not None else self.image_std
A : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A : str = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A : Any = [convert_to_rgb(SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
A : Union[str, Any] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
A : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
A : int = [self.center_crop(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
A : Optional[int] = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
A : Optional[int] = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images]
A : Optional[int] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
A : Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 311 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase : List[str] = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 311 | 1 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
__snake_case = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
__snake_case = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = ''' Hello world! cécé herlolip'''
__snake_case = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
def _A ( _lowercase , _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = dct.pop(_lowercase )
__UpperCamelCase = val
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = torch.load(_lowercase , map_location='cpu' )
__UpperCamelCase = torch.hub.load('pytorch/fairseq' , 'bart.large.cnn' ).eval()
hub_interface.model.load_state_dict(sd['model'] )
return hub_interface
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase, __UpperCamelCase = emb.weight.shape
__UpperCamelCase = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
__UpperCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def _A ( _lowercase , _lowercase , _lowercase=None ) -> Tuple:
"""simple docstring"""
if not os.path.exists(_lowercase ):
__UpperCamelCase = torch.hub.load('pytorch/fairseq' , _lowercase ).eval()
else:
__UpperCamelCase = load_xsum_checkpoint(_lowercase )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
__UpperCamelCase = checkpoint_path.replace('.' , '-' )
__UpperCamelCase = BartConfig.from_pretrained(_lowercase )
__UpperCamelCase = bart.encode(_lowercase ).unsqueeze(0 )
__UpperCamelCase = BartTokenizer.from_pretrained(_lowercase ).encode(_lowercase , return_tensors='pt' ).unsqueeze(0 )
if not torch.eq(_lowercase , _lowercase ).all():
raise ValueError(
f'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' )
if checkpoint_path == "bart.large.mnli":
__UpperCamelCase = bart.state_dict()
remove_ignore_keys_(_lowercase )
__UpperCamelCase = state_dict['model.decoder.embed_tokens.weight']
for src, dest in mnli_rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
__UpperCamelCase = BartForSequenceClassification(_lowercase ).eval()
model.load_state_dict(_lowercase )
__UpperCamelCase = bart.predict('mnli' , _lowercase , return_logits=_lowercase )
__UpperCamelCase = model(_lowercase )[0] # logits
else: # no classification heads to worry about
__UpperCamelCase = bart.model.state_dict()
remove_ignore_keys_(_lowercase )
__UpperCamelCase = state_dict['decoder.embed_tokens.weight']
__UpperCamelCase = bart.extract_features(_lowercase )
if hf_checkpoint_name == "facebook/bart-large":
__UpperCamelCase = BartModel(_lowercase ).eval()
model.load_state_dict(_lowercase )
__UpperCamelCase = model(_lowercase ).model[0]
else:
__UpperCamelCase = BartForConditionalGeneration(_lowercase ).eval() # an existing summarization ckpt
model.model.load_state_dict(_lowercase )
if hasattr(_lowercase , 'lm_head' ):
__UpperCamelCase = make_linear_from_emb(model.model.shared )
__UpperCamelCase = model.model(_lowercase )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('Some values in `fairseq_output` are different from `new_model_outputs`' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
__snake_case = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 310 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCamelCase :
@staticmethod
def snake_case_ ( *A_: Optional[Any],**A_: Tuple ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __lowerCamelCase (unittest.TestCase ):
_lowercase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def snake_case_ ( self: Dict,A_: Optional[int],A_: Tuple,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = ObjectDetectionPipeline(model=A_,image_processor=A_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def snake_case_ ( self: int,A_: Any,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png',threshold=0.0 )
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
import datasets
__UpperCamelCase = datasets.load_dataset('hf-internal-testing/fixtures_image_utils','image',split='test' )
__UpperCamelCase = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
__UpperCamelCase = object_detector(A_,threshold=0.0 )
self.assertEqual(len(A_ ),len(A_ ) )
for outputs in batch_outputs:
self.assertGreater(len(A_ ),0 )
for detected_object in outputs:
self.assertEqual(
A_,{
'score': ANY(A_ ),
'label': ANY(A_ ),
'box': {'xmin': ANY(A_ ), 'ymin': ANY(A_ ), 'xmax': ANY(A_ ), 'ymax': ANY(A_ )},
},)
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
@require_torch
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'hf-internal-testing/tiny-detr-mobilenetsv3'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=0.0 )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
],threshold=0.0,)
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
],)
@require_torch
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = AutoModelForObjectDetection.from_pretrained(A_ )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(A_ )
__UpperCamelCase = ObjectDetectionPipeline(model=A_,feature_extractor=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
__UpperCamelCase = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
],)
@require_torch
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 0.9_9_8_5
__UpperCamelCase = 'facebook/detr-resnet-50'
__UpperCamelCase = pipeline('object-detection',model=A_ )
__UpperCamelCase = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg',threshold=A_ )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],)
@require_torch
@require_pytesseract
@slow
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = 'Narsil/layoutlmv3-finetuned-funsd'
__UpperCamelCase = 0.9_9_9_3
__UpperCamelCase = pipeline('object-detection',model=A_,threshold=A_ )
__UpperCamelCase = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(A_,decimals=4 ),[
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
],)
| 310 | 1 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __snake_case ( a ):
def __init__( self : Any , _snake_case : VQModel , _snake_case : UNetaDModel , _snake_case : DDIMScheduler):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=_snake_case , unet=_snake_case , scheduler=_snake_case)
@torch.no_grad()
def __call__( self : List[str] , _snake_case : int = 1 , _snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _snake_case : float = 0.0 , _snake_case : int = 50 , _snake_case : Optional[str] = "pil" , _snake_case : bool = True , **_snake_case : str , ):
"""simple docstring"""
UpperCAmelCase_ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_snake_case , )
UpperCAmelCase_ = latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase_ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_snake_case)
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCAmelCase_ = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
UpperCAmelCase_ = {}
if accepts_eta:
UpperCAmelCase_ = eta
for t in self.progress_bar(self.scheduler.timesteps):
UpperCAmelCase_ = self.scheduler.scale_model_input(_snake_case , _snake_case)
# predict the noise residual
UpperCAmelCase_ = self.unet(_snake_case , _snake_case).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ = self.scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
# decode the image latents with the VAE
UpperCAmelCase_ = self.vqvae.decode(_snake_case).sample
UpperCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
UpperCAmelCase_ = self.numpy_to_pil(_snake_case)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case)
| 367 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A (__A : BertModel , __A : str , __A : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
UpperCAmelCase_ = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(__A ):
os.makedirs(__A )
UpperCAmelCase_ = model.state_dict()
def to_tf_var_name(__A : str ):
for patt, repl in iter(__A ):
UpperCAmelCase_ = name.replace(__A , __A )
return F"""bert/{name}"""
def create_tf_var(__A : np.ndarray , __A : str , __A : tf.Session ):
UpperCAmelCase_ = tf.dtypes.as_dtype(tensor.dtype )
UpperCAmelCase_ = tf.get_variable(dtype=__A , shape=tensor.shape , name=__A , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__A )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCAmelCase_ = to_tf_var_name(__A )
UpperCAmelCase_ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCAmelCase_ = torch_tensor.T
UpperCAmelCase_ = create_tf_var(tensor=__A , name=__A , session=__A )
tf.keras.backend.set_value(__A , __A )
UpperCAmelCase_ = session.run(__A )
print(F"""Successfully created {tf_name}: {np.allclose(__A , __A )}""" )
UpperCAmelCase_ = tf.train.Saver(tf.trainable_variables() )
saver.save(__A , os.path.join(__A , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def A (__A : Any=None ) -> str:
"""simple docstring"""
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=__A , required=__A , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=__A , default=__A , required=__A , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=__A , required=__A , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=__A , required=__A , help='''Directory in which to save tensorflow model''' )
UpperCAmelCase_ = parser.parse_args(__A )
UpperCAmelCase_ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__A , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 7 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
a__ : Union[str, Any] = None
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Optional[int] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : Optional[int] = {
'''vocab_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''',
},
}
a__ : List[str] = {
'''google/fnet-base''': 5_1_2,
'''google/fnet-large''': 5_1_2,
}
a__ : Union[str, Any] = '''▁'''
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : List[Any] = VOCAB_FILES_NAMES
snake_case__ : int = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : List[str] = ["input_ids", "token_type_ids"]
snake_case__ : Tuple = FNetTokenizer
def __init__( self : str , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : List[Any]="<unk>" , UpperCAmelCase__ : Dict="[SEP]" , UpperCAmelCase__ : Tuple="<pad>" , UpperCAmelCase__ : Any="[CLS]" , UpperCAmelCase__ : List[Any]="[MASK]" , **UpperCAmelCase__ : Union[str, Any] , ) -> str:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__SCREAMING_SNAKE_CASE = (
AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ , normalized=UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
else mask_token
)
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = do_lower_case
__SCREAMING_SNAKE_CASE = remove_space
__SCREAMING_SNAKE_CASE = keep_accents
__SCREAMING_SNAKE_CASE = vocab_file
__SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ):
copyfile(self.vocab_file , UpperCAmelCase__ )
return (out_vocab_file,)
| 54 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
__SCREAMING_SNAKE_CASE = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
__SCREAMING_SNAKE_CASE = 1
if upper_limit > 0:
__SCREAMING_SNAKE_CASE = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowerCAmelCase_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
a__ : List[str] = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"The Catalan numbers from 0 through {N} are:")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 54 | 1 |
from functools import lru_cache
@lru_cache
def a_ ( lowerCAmelCase_ : int ):
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
_snake_case : int = True
except (ImportError, AttributeError):
_snake_case : int = object
def a_ ( *lowerCAmelCase_ : List[str], **lowerCAmelCase_ : Optional[Any] ):
pass
_snake_case : Union[str, Any] = False
_snake_case : int = logging.get_logger('transformers-cli/serving')
def a_ ( lowerCAmelCase_ : Namespace ):
__lowerCAmelCase = pipeline(
task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, )
return ServeCommand(lowerCAmelCase_, args.host, args.port, args.workers )
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
@staticmethod
def lowercase ( lowerCAmelCase_ : ArgumentParser ) -> Union[str, Any]:
__lowerCAmelCase = parser.add_parser(
'serve' , help='CLI tool to run inference requests through REST and GraphQL endpoints.' )
serve_parser.add_argument(
'--task' , type=lowerCAmelCase_ , choices=get_supported_tasks() , help='The task to run the pipeline on' , )
serve_parser.add_argument('--host' , type=lowerCAmelCase_ , default='localhost' , help='Interface the server will listen on.' )
serve_parser.add_argument('--port' , type=lowerCAmelCase_ , default=8_8_8_8 , help='Port the serving will listen to.' )
serve_parser.add_argument('--workers' , type=lowerCAmelCase_ , default=1 , help='Number of http workers' )
serve_parser.add_argument('--model' , type=lowerCAmelCase_ , help='Model\'s name or path to stored model.' )
serve_parser.add_argument('--config' , type=lowerCAmelCase_ , help='Model\'s config name or path to stored model.' )
serve_parser.add_argument('--tokenizer' , type=lowerCAmelCase_ , help='Tokenizer name to use.' )
serve_parser.add_argument(
'--device' , type=lowerCAmelCase_ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
serve_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self : List[str] , lowerCAmelCase_ : Pipeline , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> str:
__lowerCAmelCase = pipeline
__lowerCAmelCase = host
__lowerCAmelCase = port
__lowerCAmelCase = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'Using serve command requires FastAPI and uvicorn. '
'Please install transformers with [serving]: pip install "transformers[serving]".'
'Or install FastAPI and uvicorn separately.' )
else:
logger.info(f"""Serving model over {host}:{port}""" )
__lowerCAmelCase = FastAPI(
routes=[
APIRoute(
'/' , self.model_info , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=['GET'] , ),
APIRoute(
'/tokenize' , self.tokenize , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=['POST'] , ),
APIRoute(
'/detokenize' , self.detokenize , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=['POST'] , ),
APIRoute(
'/forward' , self.forward , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=['POST'] , ),
] , timeout=6_0_0 , )
def lowercase ( self : Tuple ) -> str:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def lowercase ( self : Any ) -> List[str]:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def lowercase ( self : int , lowerCAmelCase_ : str = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ : bool = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) ) -> Dict:
try:
__lowerCAmelCase = self._pipeline.tokenizer.tokenize(lowerCAmelCase_ )
if return_ids:
__lowerCAmelCase = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
return ServeTokenizeResult(tokens=lowerCAmelCase_ , tokens_ids=lowerCAmelCase_ )
else:
return ServeTokenizeResult(tokens=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'model': '', 'error': str(lowerCAmelCase_ )} )
def lowercase ( self : int , lowerCAmelCase_ : List[int] = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ : bool = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ : bool = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , ) -> Union[str, Any]:
try:
__lowerCAmelCase = self._pipeline.tokenizer.decode(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return ServeDeTokenizeResult(model='' , text=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'model': '', 'error': str(lowerCAmelCase_ )} )
async def lowercase ( self : List[Any] , lowerCAmelCase_ : Union[str, Any]=Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) ) -> int:
# Check we don't have empty string
if len(lowerCAmelCase_ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__lowerCAmelCase = self._pipeline(lowerCAmelCase_ )
return ServeForwardResult(output=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(5_0_0 , {'error': str(lowerCAmelCase_ )} )
| 207 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.