code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
__UpperCamelCase : Dict = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__UpperCamelCase : List[str] = BASE_URL + '''/user'''
# https://github.com/settings/tokens
__UpperCamelCase : Optional[int] = os.environ.get('''USER_TOKEN''', '''''')
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
lowerCAmelCase = {
'Authorization': F'token {auth_token}',
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(_UpperCAmelCase , headers=_UpperCAmelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 4
|
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class __a ( unittest.TestCase ):
def A ( self : List[Any] ):
lowerCAmelCase_ : Tuple = tf.convert_to_tensor(
[
[
8.222_0991, # 3rd highest value; idx. 0
-0.562_0044,
5.2322_9752,
4.038_6393,
-6.879_8378,
-0.5478_5802,
-3.201_2153,
2.9277_7176,
1.8817_1953,
7.3534_1276, # 5th highest value; idx. 9
8.4320_7833, # 2nd highest value; idx. 10
-9.8571_1836,
-5.9620_9236,
-1.1303_9161,
-7.111_5294,
-0.836_9633,
-5.318_6408,
7.0642_7407,
0.8136_9344,
-0.8202_3817,
-5.917_9796,
0.5881_3443,
-6.9977_8438,
4.7155_1189,
-0.1877_1637,
7.4402_0759, # 4th highest value; idx. 25
9.3845_0987, # 1st highest value; idx. 26
2.1266_2941,
-9.3256_2038,
2.3565_2522,
], # cummulative prob of 5 highest values <= 0.6
[
0.5842_5518,
4.5313_9238,
-5.5751_0464,
-6.2803_0699,
-7.1952_9503,
-4.0212_2551,
1.3933_7037,
-6.0670_7057,
1.5948_0517,
-9.64_3119,
0.0390_7799,
0.6723_1762,
-8.8820_6726,
6.2711_5922, # 4th highest value; idx. 13
2.2852_0723,
4.8276_7506,
4.3042_1368,
8.827_5313, # 2nd highest value; idx. 17
5.4402_9958, # 5th highest value; idx. 18
-4.473_5794,
7.3857_9536, # 3rd highest value; idx. 20
-2.9105_1663,
2.6194_6077,
-2.567_4762,
-9.4895_9302,
-4.0292_2645,
-1.3541_6918,
9.6770_2323, # 1st highest value; idx. 27
-5.8947_8553,
1.8537_0467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
lowerCAmelCase_ : Optional[Any] = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
lowerCAmelCase_ : List[str] = tf.convert_to_tensor(
[8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above
lowerCAmelCase_ : Dict = tf_top_k_top_p_filtering(UpperCAmelCase , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
lowerCAmelCase_ : Union[str, Any] = output[output != -float("""inf""" )]
lowerCAmelCase_ : Tuple = tf.cast(
tf.where(tf.not_equal(UpperCAmelCase , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , rtol=1e-1_2 )
tf.debugging.assert_equal(UpperCAmelCase , UpperCAmelCase )
@require_tf
class __a ( unittest.TestCase ,__UpperCamelCase ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
__snake_case : Optional[Any] = {
"""AutoModelForCausalLM""": TFAutoModelForCausalLM,
"""AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq,
"""AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM,
"""AutoModelForVision2Seq""": TFAutoModelForVisionaSeq,
"""LogitsProcessorList""": TFLogitsProcessorList,
"""MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor,
"""create_tensor_fn""": tf.convert_to_tensor,
"""floats_tensor""": floats_tensor,
"""return_tensors""": """tf""",
}
@slow
def A ( self : str ):
# TF-only test: tf.saved_model export
lowerCAmelCase_ : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase_ : Tuple = 2
lowerCAmelCase_ : Dict = 2
class __a ( tf.Module ):
def __init__( self : List[str] , UpperCAmelCase : int ):
super(UpperCAmelCase , self ).__init__()
lowerCAmelCase_ : int = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=UpperCAmelCase , )
def A ( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict ):
lowerCAmelCase_ : str = self.model.generate(
input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , max_new_tokens=UpperCAmelCase , return_dict_in_generate=UpperCAmelCase , )
return {"sequences": outputs["sequences"]}
lowerCAmelCase_ : Any = [[2, 0], [1_02, 1_03]]
lowerCAmelCase_ : Optional[Any] = [[1, 0], [1, 1]]
lowerCAmelCase_ : List[str] = DummyModel(model=UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase , UpperCAmelCase , signatures={"""serving_default""": dummy_model.serving} )
lowerCAmelCase_ : Union[str, Any] = tf.saved_model.load(UpperCAmelCase ).signatures["""serving_default"""]
for batch_size in range(1 , len(UpperCAmelCase ) + 1 ):
lowerCAmelCase_ : Tuple = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
lowerCAmelCase_ : Optional[int] = serving_func(**UpperCAmelCase )["""sequences"""]
lowerCAmelCase_ : Dict = test_model.generate(**UpperCAmelCase , max_new_tokens=UpperCAmelCase )
tf.debugging.assert_equal(UpperCAmelCase , UpperCAmelCase )
@slow
def A ( self : Dict ):
# TF-only test: tf.saved_model export
lowerCAmelCase_ : Dict = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Dict = 2
class __a ( tf.Module ):
def __init__( self : str , UpperCAmelCase : List[str] ):
super(UpperCAmelCase , self ).__init__()
lowerCAmelCase_ : int = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=UpperCAmelCase , )
def A ( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] ):
lowerCAmelCase_ : List[str] = self.model.generate(
input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , max_new_tokens=UpperCAmelCase , return_dict_in_generate=UpperCAmelCase , )
return {"sequences": outputs["sequences"]}
lowerCAmelCase_ : Dict = [[2], [1_02, 1_03]]
lowerCAmelCase_ : Union[str, Any] = [[1], [1, 1]]
lowerCAmelCase_ : Tuple = DummyModel(model=UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase , UpperCAmelCase , signatures={"""serving_default""": dummy_model.serving} )
lowerCAmelCase_ : Union[str, Any] = tf.saved_model.load(UpperCAmelCase ).signatures["""serving_default"""]
for input_row in range(len(UpperCAmelCase ) ):
lowerCAmelCase_ : Dict = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
lowerCAmelCase_ : Tuple = serving_func(**UpperCAmelCase )["""sequences"""]
lowerCAmelCase_ : Union[str, Any] = test_model.generate(**UpperCAmelCase , max_new_tokens=UpperCAmelCase )
tf.debugging.assert_equal(UpperCAmelCase , UpperCAmelCase )
@slow
@require_tensorflow_text
def A ( self : List[Any] ):
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=UpperCAmelCase )
class __a ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] ):
super().__init__()
lowerCAmelCase_ : Dict = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(UpperCAmelCase , """spiece.model""" ) , """rb""" ).read() )
lowerCAmelCase_ : str = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def A ( self : Any , UpperCAmelCase : Optional[int] , *UpperCAmelCase : Tuple , **UpperCAmelCase : str ):
lowerCAmelCase_ : List[Any] = self.tokenizer.tokenize(UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = text.pad_model_inputs(
UpperCAmelCase , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
lowerCAmelCase_ : Optional[Any] = self.model.generate(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase )
return self.tokenizer.detokenize(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = CompleteSentenceTransformer()
lowerCAmelCase_ : Dict = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
lowerCAmelCase_ : List[str] = complete_model(UpperCAmelCase )
lowerCAmelCase_ : Tuple = tf.keras.Model(UpperCAmelCase , UpperCAmelCase )
keras_model.save(UpperCAmelCase )
def A ( self : List[Any] ):
# Has PT equivalent: this test relies on random sampling
lowerCAmelCase_ : Union[str, Any] = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
lowerCAmelCase_ : Union[str, Any] = 14
lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase_ : str = """Hello, my dog is cute and"""
lowerCAmelCase_ : Dict = tokenizer(UpperCAmelCase , return_tensors="""tf""" )
lowerCAmelCase_ : Dict = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase_ : Union[str, Any] = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
lowerCAmelCase_ : Optional[int] = model.generate(**UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
lowerCAmelCase_ : Tuple = [6_38, 1_98]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
lowerCAmelCase_ : List[str] = model.generate(**UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def A ( self : int ):
# Has PT equivalent: ample use of framework-specific code
lowerCAmelCase_ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
lowerCAmelCase_ : Dict = """Hugging Face is a technology company based in New York and Paris."""
lowerCAmelCase_ : Union[str, Any] = bart_tokenizer(UpperCAmelCase , return_tensors="""tf""" ).input_ids
lowerCAmelCase_ : Dict = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
lowerCAmelCase_ : Tuple = bart_model.generate(UpperCAmelCase ).numpy()
class __a ( __UpperCamelCase ):
def A ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : str=None , **UpperCAmelCase : int ):
return super().call(UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Any = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
lowerCAmelCase_ : List[str] = bart_model.generate(UpperCAmelCase , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(UpperCAmelCase , UpperCAmelCase ) )
class __a ( bart_model.model.encoder.__class__ ):
def A ( self : Tuple , UpperCAmelCase : int , **UpperCAmelCase : str ):
return super().call(UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Tuple = FakeEncoder(bart_model.config , bart_model.model.shared )
lowerCAmelCase_ : List[str] = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowerCAmelCase_ : Any = bart_model.generate(UpperCAmelCase ).numpy()
with self.assertRaises(UpperCAmelCase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(UpperCAmelCase , foo="""bar""" )
| 600
| 0
|
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
UpperCamelCase_ = 50000
UpperCamelCase_ = 5000
UpperCamelCase_ ,UpperCamelCase_ = os.path.split(__file__)
UpperCamelCase_ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def lowerCamelCase_ ( _a : datasets.Dataset , _a : int ):
'''simple docstring'''
for i in range(_a ):
UpperCAmelCase_ : List[str] = dataset[i]
@get_duration
def lowerCamelCase_ ( _a : datasets.Dataset , _a : Union[str, Any] , _a : int ):
'''simple docstring'''
for i in range(0 , len(_a ) , _a ):
UpperCAmelCase_ : Any = dataset[i : i + batch_size]
@get_duration
def lowerCamelCase_ ( _a : datasets.Dataset , _a : Union[str, Any] , _a : str ):
'''simple docstring'''
with dataset.formatted_as(type=_a ):
for i in range(_a ):
UpperCAmelCase_ : int = dataset[i]
@get_duration
def lowerCamelCase_ ( _a : datasets.Dataset , _a : Optional[Any] , _a : Tuple , _a : Optional[Any] ):
'''simple docstring'''
with dataset.formatted_as(type=_a ):
for i in range(0 , _a , _a ):
UpperCAmelCase_ : Any = dataset[i : i + batch_size]
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
UpperCAmelCase_ : List[str] = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
UpperCAmelCase_ : List[Any] = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
UpperCAmelCase_ : Optional[Any] = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
UpperCAmelCase_ : Optional[Any] = generate_example_dataset(
os.path.join(_a , """dataset.arrow""" ) , _a , num_examples=_a , seq_shapes={"""list""": (100,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(_a ) )
UpperCAmelCase_ : str = func(_a , **_a )
print("""shuffling dataset""" )
UpperCAmelCase_ : int = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(_a ) )
UpperCAmelCase_ : Any = func(
_a , **_a )
with open(_a , """wb""" ) as f:
f.write(json.dumps(_a ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 322
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 322
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a : Tuple = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_a : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 56
|
'''simple docstring'''
import argparse
A__ : Optional[Any] = """docs/source/_static/js/custom.js"""
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> int:
with open(UpperCAmelCase_ , encoding='utf-8' , newline='\n' ) as f:
__lowerCamelCase : Dict = f.readlines()
__lowerCamelCase : Tuple = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
__lowerCamelCase : Dict = F'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += F' "v{version}": "v{version}",\n'
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(UpperCAmelCase_ )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
A__ : Any = parser.parse_args()
update_custom_js(args.version)
| 13
| 0
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> float:
"""simple docstring"""
_UpperCamelCase : int = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def lowercase__ ( ) -> Any:
"""simple docstring"""
print(sum_of_series(1 ,1 ,10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ) -> None:
"""simple docstring"""
_UpperCamelCase : List[Any] = len(lowercase_ )
print("The following activities are selected:" )
# The first activity is always selected
_UpperCamelCase : List[Any] = 0
print(lowercase_ ,end="," )
# Consider rest of the activities
for j in range(lowercase_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase_ ,end="," )
_UpperCamelCase : Optional[Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = [1, 3, 0, 5, 8, 5]
lowerCamelCase__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 51
| 1
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
SCREAMING_SNAKE_CASE = {
'b0': {
'hidden_dim': 1_280,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 224,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 1_280,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 240,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 1_408,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 260,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 1_536,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 300,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 1_792,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 380,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 2_048,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 456,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 2_304,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 528,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 2_560,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 600,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def lowercase_ ( __A : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase : int =EfficientNetConfig()
lowercase : Optional[int] =CONFIG_MAP[model_name]['''hidden_dim''']
lowercase : Dict =CONFIG_MAP[model_name]['''width_coef''']
lowercase : Optional[Any] =CONFIG_MAP[model_name]['''depth_coef''']
lowercase : List[str] =CONFIG_MAP[model_name]['''image_size''']
lowercase : str =CONFIG_MAP[model_name]['''dropout_rate''']
lowercase : Any =CONFIG_MAP[model_name]['''dw_padding''']
lowercase : Optional[int] ='''huggingface/label-files'''
lowercase : Tuple ='''imagenet-1k-id2label.json'''
lowercase : Union[str, Any] =1_0_0_0
lowercase : List[Any] =json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) )
lowercase : str ={int(__A ): v for k, v in idalabel.items()}
lowercase : Dict =idalabel
lowercase : Tuple ={v: k for k, v in idalabel.items()}
return config
def lowercase_ ( ) -> Dict:
"""simple docstring"""
lowercase : str ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Tuple =Image.open(requests.get(__A , stream=__A ).raw )
return im
def lowercase_ ( __A : int ) -> Dict:
"""simple docstring"""
lowercase : Optional[Any] =CONFIG_MAP[model_name]['''image_size''']
lowercase : Tuple =EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=__A , )
return preprocessor
def lowercase_ ( __A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase : Union[str, Any] =[v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
lowercase : Any =sorted(set(__A ) )
lowercase : Dict =len(__A )
lowercase : Dict ={b: str(__A ) for b, i in zip(__A , range(__A ) )}
lowercase : str =[]
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
lowercase : List[Any] =block_name_mapping[b]
rename_keys.append((F'block{b}_expand_conv/kernel:0', F'encoder.blocks.{hf_b}.expansion.expand_conv.weight') )
rename_keys.append((F'block{b}_expand_bn/gamma:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.weight') )
rename_keys.append((F'block{b}_expand_bn/beta:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.bias') )
rename_keys.append(
(F'block{b}_expand_bn/moving_mean:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') )
rename_keys.append(
(F'block{b}_expand_bn/moving_variance:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') )
rename_keys.append(
(F'block{b}_dwconv/depthwise_kernel:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') )
rename_keys.append((F'block{b}_bn/gamma:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') )
rename_keys.append((F'block{b}_bn/beta:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') )
rename_keys.append(
(F'block{b}_bn/moving_mean:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') )
rename_keys.append(
(F'block{b}_bn/moving_variance:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') )
rename_keys.append((F'block{b}_se_reduce/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') )
rename_keys.append((F'block{b}_se_reduce/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') )
rename_keys.append((F'block{b}_se_expand/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') )
rename_keys.append((F'block{b}_se_expand/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') )
rename_keys.append(
(F'block{b}_project_conv/kernel:0', F'encoder.blocks.{hf_b}.projection.project_conv.weight') )
rename_keys.append((F'block{b}_project_bn/gamma:0', F'encoder.blocks.{hf_b}.projection.project_bn.weight') )
rename_keys.append((F'block{b}_project_bn/beta:0', F'encoder.blocks.{hf_b}.projection.project_bn.bias') )
rename_keys.append(
(F'block{b}_project_bn/moving_mean:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_mean') )
rename_keys.append(
(F'block{b}_project_bn/moving_variance:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_var') )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
lowercase : str ={}
for item in rename_keys:
if item[0] in original_param_names:
lowercase : Optional[Any] ='''efficientnet.''' + item[1]
lowercase : str ='''classifier.weight'''
lowercase : Optional[Any] ='''classifier.bias'''
return key_mapping
def lowercase_ ( __A : Any , __A : Dict , __A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase : Union[str, Any] =key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase : Dict =torch.from_numpy(__A ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase : Optional[int] =torch.from_numpy(__A ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase : Optional[Any] =torch.from_numpy(np.transpose(__A ) )
else:
lowercase : str =torch.from_numpy(__A )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__A )
@torch.no_grad()
def lowercase_ ( __A : Dict , __A : str , __A : List[Any] , __A : int ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =model_classes[model_name](
include_top=__A , weights='''imagenet''' , input_tensor=__A , input_shape=__A , pooling=__A , classes=1_0_0_0 , classifier_activation='''softmax''' , )
lowercase : Union[str, Any] =original_model.trainable_variables
lowercase : str =original_model.non_trainable_variables
lowercase : Union[str, Any] ={param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase : Dict =param.numpy()
lowercase : Any =list(tf_params.keys() )
# Load HuggingFace model
lowercase : Optional[Any] =get_efficientnet_config(__A )
lowercase : str =EfficientNetForImageClassification(__A ).eval()
lowercase : str =hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
lowercase : Optional[int] =rename_keys(__A )
replace_params(__A , __A , __A )
# Initialize preprocessor and preprocess input image
lowercase : Optional[int] =convert_image_processor(__A )
lowercase : List[Any] =preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase : Any =hf_model(**__A )
lowercase : str =outputs.logits.detach().numpy()
# Original model inference
lowercase : List[str] =False
lowercase : int =CONFIG_MAP[model_name]['''image_size''']
lowercase : str =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase : Dict =image.img_to_array(__A )
lowercase : str =np.expand_dims(__A , axis=0 )
lowercase : Tuple =original_model.predict(__A )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__A , __A , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(__A ):
os.mkdir(__A )
# Save converted model and image processor
hf_model.save_pretrained(__A )
preprocessor.save_pretrained(__A )
if push_to_hub:
# Push model and image processor to hub
print(F'Pushing converted {model_name} to the hub...' )
lowercase : Optional[Any] =F'efficientnet-{model_name}'
preprocessor.push_to_hub(__A )
hf_model.push_to_hub(__A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 94
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = (DEISMultistepScheduler,)
lowerCAmelCase__ = (('num_inference_steps', 25),)
def _UpperCAmelCase ( self , **UpperCAmelCase ):
'''simple docstring'''
_lowercase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
}
config.update(**UpperCAmelCase )
return config
def _UpperCAmelCase ( self , UpperCAmelCase=0 , **UpperCAmelCase ):
'''simple docstring'''
_lowercase = dict(self.forward_default_kwargs )
_lowercase = kwargs.pop("""num_inference_steps""" , UpperCAmelCase )
_lowercase = self.dummy_sample
_lowercase = 0.1 * sample
_lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowercase = self.get_scheduler_config(**UpperCAmelCase )
_lowercase = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
_lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
_lowercase = scheduler_class.from_pretrained(UpperCAmelCase )
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
_lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowercase , _lowercase = sample, sample
for t in range(UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ):
_lowercase = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
_lowercase = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
def _UpperCAmelCase ( self , UpperCAmelCase=0 , **UpperCAmelCase ):
'''simple docstring'''
_lowercase = dict(self.forward_default_kwargs )
_lowercase = kwargs.pop("""num_inference_steps""" , UpperCAmelCase )
_lowercase = self.dummy_sample
_lowercase = 0.1 * sample
_lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowercase = self.get_scheduler_config()
_lowercase = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
_lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
_lowercase = scheduler_class.from_pretrained(UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
_lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowercase = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
_lowercase = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCAmelCase ( self , UpperCAmelCase=None , **UpperCAmelCase ):
'''simple docstring'''
if scheduler is None:
_lowercase = self.scheduler_classes[0]
_lowercase = self.get_scheduler_config(**UpperCAmelCase )
_lowercase = scheduler_class(**UpperCAmelCase )
_lowercase = self.scheduler_classes[0]
_lowercase = self.get_scheduler_config(**UpperCAmelCase )
_lowercase = scheduler_class(**UpperCAmelCase )
_lowercase = 10
_lowercase = self.dummy_model()
_lowercase = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowercase = model(UpperCAmelCase , UpperCAmelCase )
_lowercase = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
return sample
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = dict(self.forward_default_kwargs )
_lowercase = kwargs.pop("""num_inference_steps""" , UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
_lowercase = self.get_scheduler_config()
_lowercase = scheduler_class(**UpperCAmelCase )
_lowercase = self.dummy_sample
_lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase , """set_timesteps""" ):
scheduler.set_timesteps(UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(UpperCAmelCase , """set_timesteps""" ):
_lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
_lowercase = dummy_past_residuals[: scheduler.config.solver_order]
_lowercase = scheduler.timesteps[5]
_lowercase = scheduler.timesteps[6]
_lowercase = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
_lowercase = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = DEISMultistepScheduler(**self.get_scheduler_config() )
_lowercase = self.full_loop(scheduler=UpperCAmelCase )
_lowercase = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
_lowercase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowercase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowercase = UniPCMultistepScheduler.from_config(scheduler.config )
_lowercase = DEISMultistepScheduler.from_config(scheduler.config )
_lowercase = self.full_loop(scheduler=UpperCAmelCase )
_lowercase = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def _UpperCAmelCase ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , algorithm_type="""deis""" , solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
_lowercase = self.full_loop(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
assert not torch.isnan(UpperCAmelCase ).any(), "Samples have nan numbers"
def _UpperCAmelCase ( self ):
'''simple docstring'''
self.check_over_configs(lower_order_final=UpperCAmelCase )
self.check_over_configs(lower_order_final=UpperCAmelCase )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=UpperCAmelCase , time_step=0 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = self.full_loop()
_lowercase = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = self.full_loop(prediction_type="""v_prediction""" )
_lowercase = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = self.scheduler_classes[0]
_lowercase = self.get_scheduler_config(thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0 )
_lowercase = scheduler_class(**UpperCAmelCase )
_lowercase = 10
_lowercase = self.dummy_model()
_lowercase = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowercase = model(UpperCAmelCase , UpperCAmelCase )
_lowercase = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 398
| 0
|
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : List[str] = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : int = 'encodec'
def __init__(self , __lowercase=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , __lowercase=2_40_00 , __lowercase=1 , __lowercase=False , __lowercase=None , __lowercase=None , __lowercase=1_28 , __lowercase=32 , __lowercase=1 , __lowercase=[8, 5, 4, 2] , __lowercase="weight_norm" , __lowercase=7 , __lowercase=7 , __lowercase=3 , __lowercase=2 , __lowercase=True , __lowercase="reflect" , __lowercase=2 , __lowercase=2 , __lowercase=1.0 , __lowercase=10_24 , __lowercase=None , __lowercase=True , **__lowercase , ):
__lowerCAmelCase = target_bandwidths
__lowerCAmelCase = sampling_rate
__lowerCAmelCase = audio_channels
__lowerCAmelCase = normalize
__lowerCAmelCase = chunk_length_s
__lowerCAmelCase = overlap
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_filters
__lowerCAmelCase = num_residual_layers
__lowerCAmelCase = upsampling_ratios
__lowerCAmelCase = norm_type
__lowerCAmelCase = kernel_size
__lowerCAmelCase = last_kernel_size
__lowerCAmelCase = residual_kernel_size
__lowerCAmelCase = dilation_growth_rate
__lowerCAmelCase = use_causal_conv
__lowerCAmelCase = pad_mode
__lowerCAmelCase = compress
__lowerCAmelCase = num_lstm_layers
__lowerCAmelCase = trim_right_ratio
__lowerCAmelCase = codebook_size
__lowerCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
__lowerCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**__lowercase )
@property
def _snake_case (self ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _snake_case (self ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def _snake_case (self ):
__lowerCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def _snake_case (self ):
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 474
|
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_UpperCAmelCase : Tuple = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def __magic_name__( lowerCamelCase, lowerCamelCase=None):
require_version(deps[pkg], lowerCamelCase)
| 474
| 1
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__A : str = logging.get_logger(__name__)
__A : Dict[Optional[str], Type[Formatter]] = {}
__A : Dict[Optional[str], str] = {}
__A : Dict[Optional[str], Exception] = {}
def UpperCamelCase_ ( A__ : type , A__ : Optional[str] , A__ : Optional[List[str]] = None , ):
'''simple docstring'''
lowerCAmelCase_ : str = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
lowerCAmelCase_ : Any = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
lowerCAmelCase_ : Any = format_type
def UpperCamelCase_ ( A__ : Exception , A__ : Optional[str] , A__ : Optional[List[str]] = None ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
lowerCAmelCase_ : Optional[int] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["python"])
_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
_register_formatter(NumpyFormatter, "numpy", aliases=["np"])
_register_formatter(PandasFormatter, "pandas", aliases=["pd"])
_register_formatter(CustomFormatter, "custom")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
else:
__A : List[str] = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
_register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
else:
__A : List[str] = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
_register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, "jax", aliases=[])
else:
__A : str = ValueError("JAX needs to be installed to be able to return JAX arrays.")
_register_unavailable_formatter(_jax_error, "jax", aliases=[])
def UpperCamelCase_ ( A__ : Optional[str] ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def UpperCamelCase_ ( A__ : Optional[str] , **A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = get_format_type_from_alias(A__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**A__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 275
|
'''simple docstring'''
def UpperCamelCase_ ( A__ : bytes ):
'''simple docstring'''
return "".join([hex(A__ )[2:].zfill(2 ).upper() for byte in list(A__ )] )
def UpperCamelCase_ ( A__ : str ):
'''simple docstring'''
if (len(A__ ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(A__ ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(A__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275
| 1
|
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase (a_ :str) -> List[str]:
def decorator(a_ :int):
lowercase :int = getattr(a_ , '''handle_key''' , [])
handle += [key]
setattr(a_ , '''handle_key''' , a_)
return func
return decorator
def lowerCamelCase (*a_ :List[str]) -> List[Any]:
def decorator(a_ :str):
lowercase :int = getattr(a_ , '''handle_key''' , [])
handle += keys
setattr(a_ , '''handle_key''' , a_)
return func
return decorator
class __magic_name__ ( __UpperCAmelCase ):
def __new__( cls : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : str ):
'''simple docstring'''
lowercase :List[str] = super().__new__(cls , snake_case__ , snake_case__ , snake_case__ )
if not hasattr(snake_case__ , '''key_handler''' ):
setattr(snake_case__ , '''key_handler''' , {} )
setattr(snake_case__ , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
lowercase :Tuple = getattr(snake_case__ , '''handle_key''' , [] )
for key in handled_keys:
lowercase :str = value
return new_cls
@staticmethod
def __snake_case ( cls : int ):
'''simple docstring'''
lowercase :Any = get_character()
if char != KEYMAP["undefined"]:
lowercase :Optional[int] = ord(snake_case__ )
lowercase :Optional[Any] = cls.key_handler.get(snake_case__ )
if handler:
lowercase :str = char
return handler(cls )
else:
return None
def lowerCamelCase (cls :Any) -> List[str]:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy())
| 703
|
"""simple docstring"""
def lowerCamelCase (a_ :int) -> str:
if number > 0:
raise ValueError('''input must be a negative integer''')
lowercase :Any = len(bin(a_)[3:])
lowercase :Any = bin(abs(a_) - (1 << binary_number_length))[3:]
lowercase :Tuple = (
(
'''1'''
+ '''0''' * (binary_number_length - len(a_))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 475
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
SCREAMING_SNAKE_CASE__ : List[str] = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0
|
from __future__ import annotations
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
print(f'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(snake_case ):
print(f'''{i}\t\t{d}''' )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
for j in range(snake_case ):
__magic_name__ , __magic_name__ , __magic_name__ :Tuple = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[Any] = [float('''inf''' )] * vertex_count
__magic_name__ :Tuple = 0.0
for _ in range(vertex_count - 1 ):
for j in range(snake_case ):
__magic_name__ , __magic_name__ , __magic_name__ :Dict = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__magic_name__ :Tuple = distance[u] + w
__magic_name__ :Tuple = check_negative_cycle(snake_case, snake_case, snake_case )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : Tuple = int(input("""Enter number of vertices: """).strip())
SCREAMING_SNAKE_CASE__ : Any = int(input("""Enter number of edges: """).strip())
SCREAMING_SNAKE_CASE__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
SCREAMING_SNAKE_CASE__ : Dict = {"""src""": src, """dst""": dest, """weight""": weight}
SCREAMING_SNAKE_CASE__ : List[Any] = int(input("""\nEnter shortest path source:""").strip())
SCREAMING_SNAKE_CASE__ : List[str] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 0
| 1
|
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
lowercase__ = [8, 5, 9, 7]
lowercase__ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowercase__ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
snake_case_ = claim_vector
snake_case_ = allocated_resources_table
snake_case_ = maximum_claim_table
def _lowercase ( self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _lowercase ( self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _lowercase ( self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(UpperCAmelCase_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _lowercase ( self ):
return {self.__need().index(UpperCAmelCase_ ): i for i in self.__need()}
def _lowercase ( self , **UpperCAmelCase_ ):
snake_case_ = self.__need()
snake_case_ = self.__allocated_resources_table
snake_case_ = self.__available_resources()
snake_case_ = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
snake_case_ = False
for each_need in need_list:
snake_case_ = True
for index, need in enumerate(UpperCAmelCase_ ):
if need > available_resources[index]:
snake_case_ = False
break
if execution:
snake_case_ = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
snake_case_ = original_need_index
print(f'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(UpperCAmelCase_ )
# update available/freed resources stack
snake_case_ = np.array(UpperCAmelCase_ ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(UpperCAmelCase_ ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def _lowercase ( self ):
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f'''P{self.__allocated_resources_table.index(UpperCAmelCase_ ) + 1}'''
+ " ".join(f'''{it:>8}''' for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f'''P{self.__maximum_claim_table.index(UpperCAmelCase_ ) + 1}'''
+ " ".join(f'''{it:>8}''' for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(UpperCAmelCase_ ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(UpperCAmelCase_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
'''simple docstring'''
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'''artists_file''': '''artists.json''',
'''lyrics_file''': '''lyrics.json''',
'''genres_file''': '''genres.json''',
}
lowercase__ = {
'''artists_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json''',
},
'''genres_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json''',
},
'''lyrics_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json''',
},
}
lowercase__ = {
'''jukebox''': 5_12,
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_LYRIC_TOKENS_SIZES
snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=["v3", "v2", "v2"] , UpperCAmelCase_=5_12 , UpperCAmelCase_=5 , UpperCAmelCase_="<|endoftext|>" , **UpperCAmelCase_ , ):
snake_case_ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else unk_token
super().__init__(
unk_token=UpperCAmelCase_ , n_genres=UpperCAmelCase_ , version=UpperCAmelCase_ , max_n_lyric_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
snake_case_ = version
snake_case_ = max_n_lyric_tokens
snake_case_ = n_genres
with open(UpperCAmelCase_ , encoding="utf-8" ) as vocab_handle:
snake_case_ = json.load(UpperCAmelCase_ )
with open(UpperCAmelCase_ , encoding="utf-8" ) as vocab_handle:
snake_case_ = json.load(UpperCAmelCase_ )
with open(UpperCAmelCase_ , encoding="utf-8" ) as vocab_handle:
snake_case_ = json.load(UpperCAmelCase_ )
snake_case_ = R"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
snake_case_ = oov.replace(R"\-'" , R"\-+'" )
snake_case_ = regex.compile(UpperCAmelCase_ )
snake_case_ = {v: k for k, v in self.artists_encoder.items()}
snake_case_ = {v: k for k, v in self.genres_encoder.items()}
snake_case_ = {v: k for k, v in self.lyrics_encoder.items()}
@property
def _lowercase ( self ):
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def _lowercase ( self ):
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = [self.artists_encoder.get(UpperCAmelCase_ , 0 ) for artist in list_artists]
for genres in range(len(UpperCAmelCase_ ) ):
snake_case_ = [self.genres_encoder.get(UpperCAmelCase_ , 0 ) for genre in list_genres[genres]]
snake_case_ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
snake_case_ = [[self.lyrics_encoder.get(UpperCAmelCase_ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def _lowercase ( self , UpperCAmelCase_ ):
return list(UpperCAmelCase_ )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ):
snake_case_ , snake_case_ , snake_case_ = self.prepare_for_tokenization(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = self._tokenize(UpperCAmelCase_ )
return artist, genre, lyrics
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = False ):
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
snake_case_ = artists[idx].lower()
snake_case_ = [genres[idx].lower()]
else:
snake_case_ = self._normalize(artists[idx] ) + ".v2"
snake_case_ = [
self._normalize(UpperCAmelCase_ ) + ".v2" for genre in genres[idx].split("_" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case_ = regex.compile(R"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" )
snake_case_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"
snake_case_ = {vocab[index]: index + 1 for index in range(len(UpperCAmelCase_ ) )}
snake_case_ = 0
snake_case_ = len(UpperCAmelCase_ ) + 1
snake_case_ = self.vocab
snake_case_ = {v: k for k, v in self.vocab.items()}
snake_case_ = ""
else:
snake_case_ = regex.compile(R"[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+" )
snake_case_ = self._run_strip_accents(UpperCAmelCase_ )
snake_case_ = lyrics.replace("\\" , "\n" )
snake_case_ = self.out_of_vocab.sub("" , UpperCAmelCase_ ), [], []
return artists, genres, lyrics
def _lowercase ( self , UpperCAmelCase_ ):
snake_case_ = unicodedata.normalize("NFD" , UpperCAmelCase_ )
snake_case_ = []
for char in text:
snake_case_ = unicodedata.category(UpperCAmelCase_ )
if cat == "Mn":
continue
output.append(UpperCAmelCase_ )
return "".join(UpperCAmelCase_ )
def _lowercase ( self , UpperCAmelCase_ ):
snake_case_ = (
[chr(UpperCAmelCase_ ) for i in range(ord("a" ) , ord("z" ) + 1 )]
+ [chr(UpperCAmelCase_ ) for i in range(ord("A" ) , ord("Z" ) + 1 )]
+ [chr(UpperCAmelCase_ ) for i in range(ord("0" ) , ord("9" ) + 1 )]
+ ["."]
)
snake_case_ = frozenset(UpperCAmelCase_ )
snake_case_ = re.compile(R"_+" )
snake_case_ = "".join([c if c in accepted else "_" for c in text.lower()] )
snake_case_ = pattern.sub("_" , UpperCAmelCase_ ).strip("_" )
return text
def _lowercase ( self , UpperCAmelCase_ ):
return " ".join(UpperCAmelCase_ )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False ):
# Convert to TensorType
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = TensorType(UpperCAmelCase_ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." )
import tensorflow as tf
snake_case_ = tf.constant
snake_case_ = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed." )
import torch
snake_case_ = torch.tensor
snake_case_ = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed." )
import jax.numpy as jnp # noqa: F811
snake_case_ = jnp.array
snake_case_ = _is_jax
else:
snake_case_ = np.asarray
snake_case_ = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case_ = [inputs]
if not is_tensor(UpperCAmelCase_ ):
snake_case_ = as_tensor(UpperCAmelCase_ )
except: # noqa E722
raise ValueError(
"Unable to create tensor, you should probably activate truncation and/or padding "
"with 'padding=True' 'truncation=True' to have batched tensors with the same length." )
return inputs
def __call__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="" , UpperCAmelCase_="pt" ):
snake_case_ = [0, 0, 0]
snake_case_ = [artist] * len(self.version )
snake_case_ = [genres] * len(self.version )
snake_case_ , snake_case_ , snake_case_ = self.tokenize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ , snake_case_ , snake_case_ = self._convert_token_to_id(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = [-INFINITY] * len(full_tokens[-1] )
snake_case_ = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=UpperCAmelCase_ )
for i in range(len(self.version ) )
]
return BatchEncoding({"input_ids": input_ids, "attention_masks": attention_masks} )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["artists_file"] )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=UpperCAmelCase_ ) )
snake_case_ = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["genres_file"] )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=UpperCAmelCase_ ) )
snake_case_ = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["lyrics_file"] )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=UpperCAmelCase_ ) )
return (artists_file, genres_file, lyrics_file)
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = self.artists_decoder.get(UpperCAmelCase_ )
snake_case_ = [self.genres_decoder.get(UpperCAmelCase_ ) for genre in genres_index]
snake_case_ = [self.lyrics_decoder.get(UpperCAmelCase_ ) for character in lyric_index]
return artist, genres, lyrics
| 420
| 0
|
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
a :Optional[int] = {
# 1536-bit
5: {
'prime': int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
'generator': 2,
},
}
class __a :
'''simple docstring'''
def __init__( self , _a = 14 ) -> None:
"""simple docstring"""
if group not in primes:
raise ValueError("""Unsupported Group""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = primes[group]["""prime"""]
SCREAMING_SNAKE_CASE__ : int = primes[group]["""generator"""]
SCREAMING_SNAKE_CASE__ : int = int(hexlify(urandom(32 ) ) , base=16 )
def _a ( self ) -> str:
"""simple docstring"""
return hex(self.__private_key )[2:]
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = pow(self.generator , self.__private_key , self.prime )
return hex(SCREAMING_SNAKE_CASE__ )[2:]
def _a ( self , _a ) -> bool:
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(SCREAMING_SNAKE_CASE__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def _a ( self , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = int(SCREAMING_SNAKE_CASE__ , base=16 )
if not self.is_valid_public_key(SCREAMING_SNAKE_CASE__ ):
raise ValueError("""Invalid public key""" )
SCREAMING_SNAKE_CASE__ : List[Any] = pow(SCREAMING_SNAKE_CASE__ , self.__private_key , self.prime )
return shaaaa(str(SCREAMING_SNAKE_CASE__ ).encode() ).hexdigest()
@staticmethod
def _a ( _a , _a ) -> bool:
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(SCREAMING_SNAKE_CASE__ , (prime - 1) // 2 , SCREAMING_SNAKE_CASE__ ) == 1
)
@staticmethod
def _a ( _a , _a , _a = 14 ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = int(SCREAMING_SNAKE_CASE__ , base=16 )
SCREAMING_SNAKE_CASE__ : str = int(SCREAMING_SNAKE_CASE__ , base=16 )
SCREAMING_SNAKE_CASE__ : Tuple = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError("""Invalid public key""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return shaaaa(str(SCREAMING_SNAKE_CASE__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680
|
"""simple docstring"""
def lowercase_ ( _snake_case ):
if not head:
return True
# split the list to two parts
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = head.next, head
while fast and fast.next:
SCREAMING_SNAKE_CASE__ : Tuple = fast.next.next
SCREAMING_SNAKE_CASE__ : Optional[int] = slow.next
SCREAMING_SNAKE_CASE__ : List[Any] = slow.next
SCREAMING_SNAKE_CASE__ : int = None # Don't forget here! But forget still works!
# reverse the second part
SCREAMING_SNAKE_CASE__ : int = None
while second:
SCREAMING_SNAKE_CASE__ : List[str] = second.next
SCREAMING_SNAKE_CASE__ : List[str] = node
SCREAMING_SNAKE_CASE__ : List[Any] = second
SCREAMING_SNAKE_CASE__ : List[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
SCREAMING_SNAKE_CASE__ : Optional[Any] = node.next
SCREAMING_SNAKE_CASE__ : Any = head.next
return True
def lowercase_ ( _snake_case ):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = head
while fast and fast.next:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = fast.next.next, slow.next
# 2. Push the second half into the stack
SCREAMING_SNAKE_CASE__ : Optional[int] = [slow.val]
while slow.next:
SCREAMING_SNAKE_CASE__ : Any = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
SCREAMING_SNAKE_CASE__ : int = cur.next
return True
def lowercase_ ( _snake_case ):
if not head or not head.next:
return True
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
SCREAMING_SNAKE_CASE__ : Any = 0
while head:
if head.val in d:
d[head.val].append(_snake_case )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [pos]
SCREAMING_SNAKE_CASE__ : str = head.next
pos += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = pos - 1
SCREAMING_SNAKE_CASE__ : Dict = 0
for v in d.values():
if len(_snake_case ) % 2 != 0:
middle += 1
else:
SCREAMING_SNAKE_CASE__ : str = 0
for i in range(0 ,len(_snake_case ) ):
if v[i] + v[len(_snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 223
| 0
|
def UpperCAmelCase__ (snake_case__ : Union[str, Any] = 50 ):
"""simple docstring"""
_snake_case : List[str] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 715
|
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : str = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : str = tmp_path / """cache"""
_snake_case : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[Any] = features.copy() if features else default_expected_features
_snake_case : List[Any] = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[Any] = ParquetDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : List[str] = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : int = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
if issubclass(snake_case__ , snake_case__ ):
_snake_case : Optional[Any] = parquet_path
elif issubclass(snake_case__ , snake_case__ ):
_snake_case : int = [parquet_path]
_snake_case : Union[str, Any] = tmp_path / """cache"""
_snake_case : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : str=("train",) ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
_snake_case : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = tmp_path / """cache"""
_snake_case : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case : Tuple = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Optional[int] = tmp_path / """cache"""
_snake_case : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Optional[Any] = features.copy() if features else default_expected_features
_snake_case : Dict = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case : Optional[int] = ParquetDatasetReader({"""train""": parquet_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
if split:
_snake_case : int = {split: parquet_path}
else:
_snake_case : Optional[Any] = """train"""
_snake_case : int = {"""train""": parquet_path, """test""": parquet_path}
_snake_case : Dict = tmp_path / """cache"""
_snake_case : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_snake_case : Union[str, Any] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : List[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : str = pq.ParquetFile(tmp_path / """foo.parquet""" )
_snake_case : int = pf.read()
assert dataset.data.table == output_table
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = str(shared_datadir / """test_image_rgb.jpg""" )
_snake_case : Tuple = {"""image""": [image_path]}
_snake_case : Optional[int] = Features({"""image""": Image()} )
_snake_case : int = Dataset.from_dict(snake_case__ , features=snake_case__ )
_snake_case : Optional[Any] = ParquetDatasetWriter(snake_case__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_snake_case : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_snake_case : Optional[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=snake_case__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ):
"""simple docstring"""
assert get_writer_batch_size(snake_case__ ) == expected
| 28
| 0
|
def _lowercase( __a : int ):
if num < 0:
return False
a__ =num
a__ =0
while num > 0:
a__ =rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =KandinskyVaaPriorPipeline
snake_case =['prompt']
snake_case =['prompt', 'negative_prompt']
snake_case =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
snake_case =False
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 32
@property
def __UpperCamelCase ( self) -> Tuple:
return 32
@property
def __UpperCamelCase ( self) -> int:
return self.time_input_dim
@property
def __UpperCamelCase ( self) -> str:
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 100
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
torch.manual_seed(0)
a__ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowercase_)
@property
def __UpperCamelCase ( self) -> Tuple:
torch.manual_seed(0)
a__ ={
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
a__ =PriorTransformer(**lowercase_)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a__ =nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def __UpperCamelCase ( self) -> Any:
torch.manual_seed(0)
a__ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a__ =CLIPVisionModelWithProjection(lowercase_)
return model
@property
def __UpperCamelCase ( self) -> Optional[int]:
a__ =CLIPImageProcessor(
crop_size=224 , do_center_crop=lowercase_ , do_normalize=lowercase_ , do_resize=lowercase_ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def __UpperCamelCase ( self) -> Any:
a__ =self.dummy_prior
a__ =self.dummy_image_encoder
a__ =self.dummy_text_encoder
a__ =self.dummy_tokenizer
a__ =self.dummy_image_processor
a__ =UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=lowercase_ , clip_sample_range=10.0 , )
a__ ={
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def __UpperCamelCase ( self , lowercase_ , lowercase_=0) -> Tuple:
if str(lowercase_).startswith('mps'):
a__ =torch.manual_seed(lowercase_)
else:
a__ =torch.Generator(device=lowercase_).manual_seed(lowercase_)
a__ ={
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __UpperCamelCase ( self) -> int:
a__ ='cpu'
a__ =self.get_dummy_components()
a__ =self.pipeline_class(**lowercase_)
a__ =pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
a__ =pipe(**self.get_dummy_inputs(lowercase_))
a__ =output.image_embeds
a__ =pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
a__ =image[0, -10:]
a__ =image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a__ =np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def __UpperCamelCase ( self) -> List[Any]:
a__ =torch_device == 'cpu'
a__ =True
a__ =False
self._test_inference_batch_single_identical(
test_max_difference=lowercase_ , relax_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
@skip_mps
def __UpperCamelCase ( self) -> Optional[int]:
a__ =torch_device == 'cpu'
a__ =False
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
| 20
| 1
|
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def lowercase_ ( lowercase__ = 200_0000 ) ->int:
_snake_case: list[int] = [0]
_snake_case: int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_snake_case: int = 0
# the area corresponding to the grid that gives the product closest to target
_snake_case: int = 0
# an estimate of b, using the quadratic formula
_snake_case: float
# the largest integer less than b_estimate
_snake_case: int
# the largest integer less than b_estimate
_snake_case: int
# the triangle number corresponding to b_floor
_snake_case: int
# the triangle number corresponding to b_ceil
_snake_case: int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_snake_case: Any = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_snake_case: str = floor(lowercase__ )
_snake_case: Union[str, Any] = ceil(lowercase__ )
_snake_case: Dict = triangle_numbers[b_floor]
_snake_case: Any = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_snake_case: Dict = triangle_b_first_guess * triangle_a
_snake_case: Optional[Any] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_snake_case: Optional[int] = triangle_b_second_guess * triangle_a
_snake_case: Optional[int] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'{solution() = }')
| 720
|
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
A : str = TypeVar('T')
class lowerCamelCase ( Generic[T] ):
_SCREAMING_SNAKE_CASE = 42 # Cache store of keys
_SCREAMING_SNAKE_CASE = 42 # References of the keys in cache
_SCREAMING_SNAKE_CASE = 10 # Maximum capacity of cache
def __init__( self : List[Any] , __snake_case : int ):
'''simple docstring'''
_snake_case: Dict = deque()
_snake_case: Union[str, Any] = set()
if not n:
_snake_case: Optional[int] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
_snake_case: Tuple = n
def SCREAMING_SNAKE_CASE_ ( self : Any , __snake_case : T ):
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_snake_case: int = self.dq_store.pop()
self.key_reference.remove(__snake_case )
else:
self.dq_store.remove(__snake_case )
self.dq_store.appendleft(__snake_case )
self.key_reference.add(__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
for k in self.dq_store:
print(__snake_case )
def __repr__( self : List[Any] ):
'''simple docstring'''
return f'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
A : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 273
| 0
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ['''image_processor''', '''tokenizer''']
a__ = '''LayoutLMv3ImageProcessor'''
a__ = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''')
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCAmelCase , )
__magic_name__ :Union[str, Any] = kwargs.pop('''feature_extractor''' )
__magic_name__ :List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = True , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
__magic_name__ :Union[str, Any] = self.image_processor(images=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__magic_name__ :str = [text] # add batch dimension (as the image processor always adds a batch dimension)
__magic_name__ :Union[str, Any] = features['''words''']
__magic_name__ :Dict = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
# add pixel values
__magic_name__ :List[str] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__magic_name__ :Any = self.get_overflowing_images(__lowerCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
__magic_name__ :List[str] = images
return encoded_inputs
def A ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__magic_name__ :Optional[int] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F''' {len(__lowerCAmelCase )} and {len(__lowerCAmelCase )}''' )
return images_with_overflow
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def A ( self ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def A ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , )
return self.image_processor_class
@property
def A ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __lowerCAmelCase , )
return self.image_processor
| 0
|
"""simple docstring"""
import string
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> str:
_lowerCAmelCase : int = """"""
for i in sequence:
_lowerCAmelCase : int = ord(_lowerCamelCase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> str:
_lowerCAmelCase : Union[str, Any] = string.ascii_letters
_lowerCAmelCase : Tuple = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(_lowerCamelCase )] if c in letters else c for c in sequence )
def SCREAMING_SNAKE_CASE ( ) -> None:
from timeit import timeit
print("""Running performance benchmarks...""" )
_lowerCAmelCase : List[str] = """from string import printable ; from __main__ import atbash, atbash_slow"""
print(f"> atbash_slow(): {timeit('atbash_slow(printable)' ,setup=_lowerCamelCase )} seconds" )
print(f"> atbash(): {timeit('atbash(printable)' ,setup=_lowerCamelCase )} seconds" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 213
| 0
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ : str = field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
a_ : ClassVar[Features] = Features({"""image""": Image()} )
a_ : ClassVar[Features] = Features({"""labels""": ClassLabel} )
a_ : str = "image"
a_ : str = "labels"
def _lowerCAmelCase ( self , A ):
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , A ):
raise ValueError(F"Column {self.label_column} is not a ClassLabel." )
_lowerCamelCase : Dict = copy.deepcopy(self )
_lowerCamelCase : Optional[int] = self.label_schema.copy()
_lowerCamelCase : int = features[self.label_column]
_lowerCamelCase : Tuple = label_schema
return task_template
@property
def _lowerCAmelCase ( self ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 349
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_50, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
] )
class A_(unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding='utf-8' , check=A , )
assert hasattr(self , 'env' )
def _lowerCAmelCase ( self , A ):
_lowerCamelCase : List[str] = F"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
_lowerCamelCase : Any = {'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=A , instance_count=A , instance_type=self.instance_type , debugger_hook_config=A , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=A , py_version='py36' , )
def _lowerCAmelCase ( self , A ):
TrainingJobAnalytics(A ).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv" )
@parameterized.expand([(2,)] )
def _lowerCAmelCase ( self , A ):
# create estimator
_lowerCamelCase : List[Any] = self.create_estimator(A )
# run training
estimator.fit()
# result dataframe
_lowerCamelCase : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowerCamelCase : Dict = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_lowerCamelCase : Dict = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCamelCase : Union[str, Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , A )
| 349
| 1
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
lowerCAmelCase_ : Any = logging.get_logger(__name__)
lowerCAmelCase_ : Dict = {'vocab_file': 'vocab.txt'}
lowerCAmelCase_ : List[str] = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
lowerCAmelCase_ : List[str] = {
'facebook/esm2_t6_8M_UR50D': 1024,
'facebook/esm2_t12_35M_UR50D': 1024,
}
def UpperCAmelCase ( A : str ):
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
SCREAMING_SNAKE_CASE : Dict = f.read().splitlines()
return [l.strip() for l in lines]
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
_lowerCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : str = ["""input_ids""", """attention_mask"""]
def __init__( self : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int]="<unk>" , lowerCAmelCase__ : List[Any]="<cls>" , lowerCAmelCase__ : Optional[int]="<pad>" , lowerCAmelCase__ : List[Any]="<mask>" , lowerCAmelCase__ : Tuple="<eos>" , **lowerCAmelCase__ : Tuple , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = load_vocab_file(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = dict(enumerate(self.all_tokens ) )
SCREAMING_SNAKE_CASE : List[Any] = {tok: ind for ind, tok in enumerate(self.all_tokens )}
SCREAMING_SNAKE_CASE : List[Any] = unk_token
SCREAMING_SNAKE_CASE : int = cls_token
SCREAMING_SNAKE_CASE : Any = pad_token
SCREAMING_SNAKE_CASE : Dict = mask_token
SCREAMING_SNAKE_CASE : str = eos_token
SCREAMING_SNAKE_CASE : List[str] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __lowercase ( self : int , lowerCAmelCase__ : int ):
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase__ , self.unk_token )
def __lowercase ( self : Tuple , lowerCAmelCase__ : str ):
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase__ , self._token_to_id.get(self.unk_token ) )
def __lowercase ( self : Dict , lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
return text.split()
def __lowercase ( self : Union[str, Any] , lowerCAmelCase__ : int=False ):
"""simple docstring"""
return len(self._id_to_token )
def __lowercase ( self : Any ):
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens )}
def __lowercase ( self : Tuple , lowerCAmelCase__ : str ):
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase__ , self._token_to_id.get(self.unk_token ) )
def __lowercase ( self : List[str] , lowerCAmelCase__ : int ):
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase__ , self.unk_token )
def __lowercase ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [self.cls_token_id]
SCREAMING_SNAKE_CASE : List[Any] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __lowercase ( self : List[str] , lowerCAmelCase__ : List , lowerCAmelCase__ : Optional[List] = None , lowerCAmelCase__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
SCREAMING_SNAKE_CASE : Dict = [1] + ([0] * len(lowerCAmelCase__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCAmelCase__ ) + [1]
return mask
def __lowercase ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(lowerCAmelCase__ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __lowercase ( self : Any ):
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=lowerCAmelCase__ )
def __lowercase ( self : Optional[int] , lowerCAmelCase__ : Union[List[str], List[AddedToken]] , lowerCAmelCase__ : bool = False ):
"""simple docstring"""
return super()._add_tokens(lowerCAmelCase__ , special_tokens=lowerCAmelCase__ )
| 527
|
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 477
| 0
|
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , UpperCamelCase : List[str] , )->Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = parent
__SCREAMING_SNAKE_CASE : Any = 1_3
__SCREAMING_SNAKE_CASE : Any = 7
__SCREAMING_SNAKE_CASE : Optional[int] = True
__SCREAMING_SNAKE_CASE : Optional[int] = True
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : Dict = 9_9
__SCREAMING_SNAKE_CASE : Union[str, Any] = 3_2
__SCREAMING_SNAKE_CASE : Union[str, Any] = 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = 4
__SCREAMING_SNAKE_CASE : List[str] = 3_7
__SCREAMING_SNAKE_CASE : str = '''gelu'''
__SCREAMING_SNAKE_CASE : Dict = 0.1
__SCREAMING_SNAKE_CASE : List[Any] = 0.1
__SCREAMING_SNAKE_CASE : List[str] = 5_1_2
__SCREAMING_SNAKE_CASE : Optional[int] = 1_6
__SCREAMING_SNAKE_CASE : Optional[Any] = 2
__SCREAMING_SNAKE_CASE : List[str] = 0.0_2
__SCREAMING_SNAKE_CASE : Any = 3
__SCREAMING_SNAKE_CASE : Optional[Any] = 4
__SCREAMING_SNAKE_CASE : int = None
def __snake_case ( self : List[str] )->Dict:
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Any = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Any = None
__SCREAMING_SNAKE_CASE : str = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Dict = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : Any , UpperCamelCase : Dict , UpperCamelCase : List[str] , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] )->Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = TFDistilBertModel(config=UpperCamelCase )
__SCREAMING_SNAKE_CASE : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__SCREAMING_SNAKE_CASE : List[str] = model(UpperCamelCase )
__SCREAMING_SNAKE_CASE : str = [input_ids, input_mask]
__SCREAMING_SNAKE_CASE : Tuple = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : List[Any] , UpperCamelCase : Any , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : List[str] , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] )->Tuple:
__SCREAMING_SNAKE_CASE : List[str] = TFDistilBertForMaskedLM(config=UpperCamelCase )
__SCREAMING_SNAKE_CASE : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : List[Any] , UpperCamelCase : int , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] )->Any:
__SCREAMING_SNAKE_CASE : str = TFDistilBertForQuestionAnswering(config=UpperCamelCase )
__SCREAMING_SNAKE_CASE : int = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
__SCREAMING_SNAKE_CASE : List[str] = model(UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : str , UpperCamelCase : int , UpperCamelCase : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict )->Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = TFDistilBertForSequenceClassification(UpperCamelCase )
__SCREAMING_SNAKE_CASE : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : int , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : Dict )->List[str]:
__SCREAMING_SNAKE_CASE : List[Any] = self.num_choices
__SCREAMING_SNAKE_CASE : Optional[int] = TFDistilBertForMultipleChoice(UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE : Tuple = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
__SCREAMING_SNAKE_CASE : Tuple = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] )->Dict:
__SCREAMING_SNAKE_CASE : Dict = self.num_labels
__SCREAMING_SNAKE_CASE : Tuple = TFDistilBertForTokenClassification(UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__SCREAMING_SNAKE_CASE : str = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : List[str] )->Dict:
__SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
(__SCREAMING_SNAKE_CASE) : Union[str, Any] = config_and_inputs
__SCREAMING_SNAKE_CASE : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE (__A , __A , unittest.TestCase ):
lowerCAmelCase = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
lowerCAmelCase = (
{
"""feature-extraction""": TFDistilBertModel,
"""fill-mask""": TFDistilBertForMaskedLM,
"""question-answering""": TFDistilBertForQuestionAnswering,
"""text-classification""": TFDistilBertForSequenceClassification,
"""token-classification""": TFDistilBertForTokenClassification,
"""zero-shot""": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
def __snake_case ( self : Dict )->str:
__SCREAMING_SNAKE_CASE : str = TFDistilBertModelTester(self )
__SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=UpperCamelCase , dim=3_7 )
def __snake_case ( self : Union[str, Any] )->Dict:
self.config_tester.run_common_tests()
def __snake_case ( self : Optional[Any] )->Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCamelCase )
def __snake_case ( self : Tuple )->Any:
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCamelCase )
def __snake_case ( self : Optional[int] )->List[Any]:
__SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCamelCase )
def __snake_case ( self : Any )->str:
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCamelCase )
def __snake_case ( self : Optional[Any] )->List[str]:
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCamelCase )
def __snake_case ( self : str )->Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCamelCase )
@slow
def __snake_case ( self : List[Any] )->Dict:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = TFDistilBertModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@require_tf
class _SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def __snake_case ( self : List[str] )->List[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
__SCREAMING_SNAKE_CASE : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE : List[Any] = model(UpperCamelCase )[0]
__SCREAMING_SNAKE_CASE : str = [1, 6, 7_6_8]
self.assertEqual(output.shape , UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase , atol=1E-4 )
| 708
|
from collections.abc import Generator
from math import sin
def _lowerCAmelCase ( __lowerCamelCase : bytes ):
"""simple docstring"""
if len(__lowerCamelCase ) != 32:
raise ValueError("Input must be of length 32" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _lowerCAmelCase ( __lowerCamelCase : int ):
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
__SCREAMING_SNAKE_CASE : Any = format(__lowerCamelCase , "08x" )[-8:]
__SCREAMING_SNAKE_CASE : Optional[Any] = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def _lowerCAmelCase ( __lowerCamelCase : bytes ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = b""
for char in message:
bit_string += format(__lowerCamelCase , "08b" ).encode("utf-8" )
__SCREAMING_SNAKE_CASE : List[str] = format(len(__lowerCamelCase ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__lowerCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _lowerCAmelCase ( __lowerCamelCase : bytes ):
"""simple docstring"""
if len(__lowerCamelCase ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__lowerCamelCase ) , 512 ):
__SCREAMING_SNAKE_CASE : int = bit_string[pos : pos + 512]
__SCREAMING_SNAKE_CASE : Tuple = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _lowerCAmelCase ( __lowerCamelCase : int ):
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = format(__lowerCamelCase , "032b" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__lowerCamelCase , 2 )
def _lowerCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
return (a + b) % 2**32
def _lowerCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _lowerCAmelCase ( __lowerCamelCase : bytes ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = preprocess(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__SCREAMING_SNAKE_CASE : Tuple = 0X67452301
__SCREAMING_SNAKE_CASE : Optional[Any] = 0Xefcdab89
__SCREAMING_SNAKE_CASE : Optional[int] = 0X98badcfe
__SCREAMING_SNAKE_CASE : Optional[Any] = 0X10325476
__SCREAMING_SNAKE_CASE : List[Any] = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Any = aa
__SCREAMING_SNAKE_CASE : Union[str, Any] = ba
__SCREAMING_SNAKE_CASE : str = ca
__SCREAMING_SNAKE_CASE : Dict = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__SCREAMING_SNAKE_CASE : Union[str, Any] = d ^ (b & (c ^ d))
__SCREAMING_SNAKE_CASE : Optional[Any] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__SCREAMING_SNAKE_CASE : int = c ^ (d & (b ^ c))
__SCREAMING_SNAKE_CASE : int = (5 * i + 1) % 16
elif i <= 47:
__SCREAMING_SNAKE_CASE : List[str] = b ^ c ^ d
__SCREAMING_SNAKE_CASE : Union[str, Any] = (3 * i + 5) % 16
else:
__SCREAMING_SNAKE_CASE : Any = c ^ (b | not_aa(__lowerCamelCase ))
__SCREAMING_SNAKE_CASE : str = (7 * i) % 16
__SCREAMING_SNAKE_CASE : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32
__SCREAMING_SNAKE_CASE : Dict = d
__SCREAMING_SNAKE_CASE : str = c
__SCREAMING_SNAKE_CASE : Tuple = b
__SCREAMING_SNAKE_CASE : Optional[int] = sum_aa(__lowerCamelCase , left_rotate_aa(__lowerCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
__SCREAMING_SNAKE_CASE : Dict = sum_aa(__lowerCamelCase , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = sum_aa(__lowerCamelCase , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = sum_aa(__lowerCamelCase , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = sum_aa(__lowerCamelCase , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = reformat_hex(__lowerCamelCase ) + reformat_hex(__lowerCamelCase ) + reformat_hex(__lowerCamelCase ) + reformat_hex(__lowerCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 447
| 0
|
'''simple docstring'''
import baseaa
def _UpperCamelCase ( UpperCamelCase__ ):
return baseaa.baaencode(string.encode("""utf-8""" ) )
def _UpperCamelCase ( UpperCamelCase__ ):
return baseaa.baadecode(UpperCamelCase__ ).decode("""utf-8""" )
if __name__ == "__main__":
__A ='Hello World!'
__A =baseaa_encode(test)
print(encoded)
__A =baseaa_decode(encoded)
print(decoded)
| 407
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _snake_case ( a__ ):
@staticmethod
@abstractmethod
def snake_case__ ( _lowerCamelCase):
raise NotImplementedError()
@abstractmethod
def snake_case__ ( self):
raise NotImplementedError()
| 407
| 1
|
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __UpperCAmelCase ( a_: str, a_: Union[str, Any]=None ) -> int:
_UpperCAmelCase : Dict = None
if token is not None:
_UpperCAmelCase : Optional[int] = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""}
_UpperCAmelCase : str = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
_UpperCAmelCase : Optional[Any] = requests.get(a_, headers=a_ ).json()
_UpperCAmelCase : Tuple = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_UpperCAmelCase : str = math.ceil((result["total_count"] - 100) / 100 )
for i in range(a_ ):
_UpperCAmelCase : int = requests.get(url + f"""&page={i + 2}""", headers=a_ ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def __UpperCAmelCase ( a_: Optional[Any], a_: Optional[int]=None ) -> Union[str, Any]:
_UpperCAmelCase : Any = None
if token is not None:
_UpperCAmelCase : Any = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""}
_UpperCAmelCase : Dict = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
_UpperCAmelCase : Dict = requests.get(a_, headers=a_ ).json()
_UpperCAmelCase : List[Any] = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
_UpperCAmelCase : List[Any] = math.ceil((result["total_count"] - 100) / 100 )
for i in range(a_ ):
_UpperCAmelCase : Any = requests.get(url + f"""&page={i + 2}""", headers=a_ ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def __UpperCAmelCase ( a_: Dict, a_: Union[str, Any], a_: Union[str, Any], a_: Tuple ) -> Dict:
_UpperCAmelCase : int = None
if token is not None:
_UpperCAmelCase : str = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""}
_UpperCAmelCase : Optional[Any] = requests.get(a_, headers=a_, allow_redirects=a_ )
_UpperCAmelCase : Optional[Any] = result.headers["Location"]
_UpperCAmelCase : Union[str, Any] = requests.get(a_, allow_redirects=a_ )
_UpperCAmelCase : Dict = os.path.join(a_, f"""{artifact_name}.zip""" )
with open(a_, "wb" ) as fp:
fp.write(response.content )
def __UpperCAmelCase ( a_: List[Any], a_: List[str]=None ) -> List[Any]:
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Any = None
with zipfile.ZipFile(a_ ) as z:
for filename in z.namelist():
if not os.path.isdir(a_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(a_ ) as f:
for line in f:
_UpperCAmelCase : Tuple = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_UpperCAmelCase : Any = line[: line.index(": " )]
_UpperCAmelCase : List[str] = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
_UpperCAmelCase : Union[str, Any] = line[len("FAILED " ) :]
failed_tests.append(a_ )
elif filename == "job_name.txt":
_UpperCAmelCase : Optional[int] = line
if len(a_ ) != len(a_ ):
raise ValueError(
f"""`errors` and `failed_tests` should have the same number of elements. Got {len(a_ )} for `errors` """
f"""and {len(a_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
" problem." )
_UpperCAmelCase : int = None
if job_name and job_links:
_UpperCAmelCase : Dict = job_links.get(a_, a_ )
# A list with elements of the form (line of error, error, failed test)
_UpperCAmelCase : Optional[Any] = [x + [y] + [job_link] for x, y in zip(a_, a_ )]
return result
def __UpperCAmelCase ( a_: Optional[int], a_: Optional[Any]=None ) -> List[str]:
_UpperCAmelCase : str = []
_UpperCAmelCase : str = [os.path.join(a_, a_ ) for p in os.listdir(a_ ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(a_, job_links=a_ ) )
return errors
def __UpperCAmelCase ( a_: str, a_: Tuple=None ) -> Union[str, Any]:
_UpperCAmelCase : Optional[int] = Counter()
counter.update([x[1] for x in logs] )
_UpperCAmelCase : Union[str, Any] = counter.most_common()
_UpperCAmelCase : str = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_UpperCAmelCase : Any = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
_UpperCAmelCase : Optional[int] = dict(sorted(r.items(), key=lambda a_ : item[1]["count"], reverse=a_ ) )
return r
def __UpperCAmelCase ( a_: List[Any] ) -> str:
_UpperCAmelCase : Optional[int] = test.split("::" )[0]
if test.startswith("tests/models/" ):
_UpperCAmelCase : Dict = test.split("/" )[2]
else:
_UpperCAmelCase : List[Any] = None
return test
def __UpperCAmelCase ( a_: Dict, a_: List[Any]=None ) -> Union[str, Any]:
_UpperCAmelCase : int = [(x[0], x[1], get_model(x[2] )) for x in logs]
_UpperCAmelCase : str = [x for x in logs if x[2] is not None]
_UpperCAmelCase : Union[str, Any] = {x[2] for x in logs}
_UpperCAmelCase : Optional[int] = {}
for test in tests:
_UpperCAmelCase : Union[str, Any] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_UpperCAmelCase : int = counter.most_common()
_UpperCAmelCase : List[Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_UpperCAmelCase : Union[str, Any] = sum(error_counts.values() )
if n_errors > 0:
_UpperCAmelCase : Optional[Any] = {"count": n_errors, "errors": error_counts}
_UpperCAmelCase : Tuple = dict(sorted(r.items(), key=lambda a_ : item[1]["count"], reverse=a_ ) )
return r
def __UpperCAmelCase ( a_: Optional[int] ) -> Any:
_UpperCAmelCase : List[Any] = "| no. | error | status |"
_UpperCAmelCase : str = "|-:|:-|:-|"
_UpperCAmelCase : Dict = [header, sep]
for error in reduced_by_error:
_UpperCAmelCase : Dict = reduced_by_error[error]["count"]
_UpperCAmelCase : Tuple = f"""| {count} | {error[:100]} | |"""
lines.append(a_ )
return "\n".join(a_ )
def __UpperCAmelCase ( a_: List[str] ) -> str:
_UpperCAmelCase : List[Any] = "| model | no. of errors | major error | count |"
_UpperCAmelCase : Optional[int] = "|-:|-:|-:|-:|"
_UpperCAmelCase : Optional[int] = [header, sep]
for model in reduced_by_model:
_UpperCAmelCase : Any = reduced_by_model[model]["count"]
_UpperCAmelCase : List[Any] = list(reduced_by_model[model]["errors"].items() )[0]
_UpperCAmelCase : str = f"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(a_ )
return "\n".join(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
__a = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__a = get_job_links(args.workflow_run_id, token=args.token)
__a = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__a = k.find(' / ')
__a = k[index + len(' / ') :]
__a = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__a = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__a = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__a = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__a = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__a = reduce_by_error(errors)
__a = reduce_by_model(errors)
__a = make_github_table(reduced_by_error)
__a = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 707
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ):
_UpperCAmelCase : int = ArgumentParser("Accelerate CLI tool", usage="accelerate <command> [<args>]", allow_abbrev=a_ )
_UpperCAmelCase : Union[str, Any] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=a_ )
env_command_parser(subparsers=a_ )
launch_command_parser(subparsers=a_ )
tpu_command_parser(subparsers=a_ )
test_command_parser(subparsers=a_ )
# Let's go
_UpperCAmelCase : List[Any] = parser.parse_args()
if not hasattr(a_, "func" ):
parser.print_help()
exit(1 )
# Run
args.func(a_ )
if __name__ == "__main__":
main()
| 257
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __magic_name__ ( snake_case ):
UpperCamelCase_ :UNetaDModel
UpperCamelCase_ :KarrasVeScheduler
def __init__( self , _lowercase , _lowercase )-> Optional[int]:
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase )
@torch.no_grad()
def __call__( self , _lowercase = 1 , _lowercase = 50 , _lowercase = None , _lowercase = "pil" , _lowercase = True , **_lowercase , )-> Union[Tuple, ImagePipelineOutput]:
UpperCamelCase_ = self.unet.config.sample_size
UpperCamelCase_ = (batch_size, 3, img_size, img_size)
UpperCamelCase_ = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
UpperCamelCase_ = randn_tensor(_lowercase , generator=_lowercase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
UpperCamelCase_ = self.scheduler.schedule[t]
UpperCamelCase_ = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
UpperCamelCase_ , UpperCamelCase_ = self.scheduler.add_noise_to_input(_lowercase , _lowercase , generator=_lowercase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCamelCase_ = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
UpperCamelCase_ = self.scheduler.step(_lowercase , _lowercase , _lowercase , _lowercase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCamelCase_ = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
UpperCamelCase_ = self.scheduler.step_correct(
_lowercase , _lowercase , _lowercase , _lowercase , step_output.prev_sample , step_output["derivative"] , )
UpperCamelCase_ = step_output.prev_sample
UpperCamelCase_ = (sample / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase_ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase_ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 628
|
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ = 4_0_0_0_0_0_0 )-> int:
"""simple docstring"""
UpperCamelCase_ = [0, 1]
UpperCamelCase_ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
UpperCamelCase_ = 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 628
| 1
|
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowercase_ ( _A : Optional[int] , _A : List[Any] = True , _A : List[str] = math.inf , _A : List[str] = -math.inf , _A : str = math.inf , _A : Optional[Any] = -math.inf , _A : Optional[int] = False , _A : List[Any] = 100 , _A : Tuple = 0.01 , _A : Dict = 1 , ):
"""simple docstring"""
lowerCamelCase__ : int = False
lowerCamelCase__ : Tuple = search_prob
lowerCamelCase__ : str = start_temperate
lowerCamelCase__ : Any = []
lowerCamelCase__ : str = 0
lowerCamelCase__ : str = None
while not search_end:
lowerCamelCase__ : Any = current_state.score()
if best_state is None or current_score > best_state.score():
lowerCamelCase__ : List[Any] = current_state
scores.append(_lowerCAmelCase )
iterations += 1
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : Dict = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCamelCase__ : List[Any] = random.randint(0 , len(_lowerCAmelCase ) - 1 ) # picking a random neighbor
lowerCamelCase__ : Dict = neighbors.pop(_lowerCAmelCase )
lowerCamelCase__ : Any = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCamelCase__ : int = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCamelCase__ : List[str] = picked_neighbor
else:
lowerCamelCase__ : List[str] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCamelCase__ : Optional[Any] = picked_neighbor
lowerCamelCase__ : Any = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCamelCase__ : Dict = True
else:
lowerCamelCase__ : List[str] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_lowerCAmelCase ) , _lowerCAmelCase )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def lowercase_ ( _A : List[str] , _A : Dict ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
A : Optional[int] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
A : Union[str, Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
A : List[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
A : List[Any] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def lowercase_ ( _A : List[str] , _A : Optional[Any] ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
A : int = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
A : int = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'{local_min.score()}'
)
A : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
A : int = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'{local_min.score()}'
)
| 715
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
A : List[Any] = logging.get_logger(__name__)
A : Any = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "blenderbot-small"
A__ = ["past_key_values"]
A__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Dict , __lowerCamelCase : List[str]=50265 , __lowerCamelCase : str=512 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : str=2048 , __lowerCamelCase : str=16 , __lowerCamelCase : List[Any]=8 , __lowerCamelCase : Any=2048 , __lowerCamelCase : List[str]=16 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Tuple=512 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : int=0.0 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Dict=False , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : str=2 , __lowerCamelCase : Any=2 , **__lowerCamelCase : int , ):
'''simple docstring'''
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = d_model
lowerCamelCase__ : Optional[int] = encoder_ffn_dim
lowerCamelCase__ : Dict = encoder_layers
lowerCamelCase__ : Any = encoder_attention_heads
lowerCamelCase__ : Union[str, Any] = decoder_ffn_dim
lowerCamelCase__ : str = decoder_layers
lowerCamelCase__ : Optional[Any] = decoder_attention_heads
lowerCamelCase__ : List[str] = dropout
lowerCamelCase__ : List[Any] = attention_dropout
lowerCamelCase__ : Dict = activation_dropout
lowerCamelCase__ : Optional[Any] = activation_function
lowerCamelCase__ : Dict = init_std
lowerCamelCase__ : List[str] = encoder_layerdrop
lowerCamelCase__ : Dict = decoder_layerdrop
lowerCamelCase__ : int = use_cache
lowerCamelCase__ : List[Any] = encoder_layers
lowerCamelCase__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , )
class _lowercase ( lowercase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : int = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
lowerCamelCase__ : Union[str, Any] = {0: "batch"}
lowerCamelCase__ : int = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowerCamelCase__ : Tuple = {0: "batch", 1: "decoder_sequence"}
lowerCamelCase__ : str = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCamelCase__ : Tuple = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers
for i in range(__lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"}
lowerCamelCase__ : Optional[int] = {0: "batch", 2: "past_sequence + sequence"}
else:
lowerCamelCase__ : Any = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Union[str, Any] = super().outputs
else:
lowerCamelCase__ : int = super(__lowerCamelCase , self ).outputs
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers
for i in range(__lowerCamelCase ):
lowerCamelCase__ : Tuple = {0: "batch", 2: "past_sequence + sequence"}
lowerCamelCase__ : Any = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowerCAmelCase ( self : int , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Generate decoder inputs
lowerCamelCase__ : List[str] = seq_length if not self.use_past else 1
lowerCamelCase__ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Optional[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
lowerCamelCase__ : Optional[Any] = dict(**__lowerCamelCase , **__lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : Tuple = common_inputs["input_ids"].shape
lowerCamelCase__ : int = common_inputs["decoder_input_ids"].shape[1]
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.num_attention_heads
lowerCamelCase__ : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : Optional[int] = decoder_seq_length + 3
lowerCamelCase__ : Dict = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCamelCase__ : List[Any] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase )] , dim=1 )
lowerCamelCase__ : Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCamelCase__ , lowerCamelCase__ : str = self.num_layers
lowerCamelCase__ : Union[str, Any] = min(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = max(__lowerCamelCase , __lowerCamelCase ) - min_num_layers
lowerCamelCase__ : str = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
) )
# TODO: test this.
lowerCamelCase__ : Optional[int] = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__lowerCamelCase , __lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) )
return common_inputs
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
'''simple docstring'''
lowerCamelCase__ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : int = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCamelCase__ : str = seqlen + 2
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.num_layers
lowerCamelCase__ , lowerCamelCase__ : int = self.num_attention_heads
lowerCamelCase__ : Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : Union[str, Any] = common_inputs["attention_mask"].dtype
lowerCamelCase__ : List[str] = torch.cat(
[common_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 )
lowerCamelCase__ : Tuple = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(__lowerCamelCase )
]
return common_inputs
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
'''simple docstring'''
lowerCamelCase__ : str = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase__ : List[str] = tokenizer.num_special_tokens_to_add(__lowerCamelCase )
lowerCamelCase__ : Dict = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase__ : Optional[int] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCamelCase__ : Optional[Any] = dict(tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase ) )
return common_inputs
def lowerCAmelCase ( self : Any , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
elif self.task == "causal-lm":
lowerCamelCase__ : Any = self._generate_dummy_inputs_for_causal_lm(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
else:
lowerCamelCase__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
return common_inputs
def lowerCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Dict = super()._flatten_past_key_values_(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
lowerCamelCase__ : int = super(__lowerCamelCase , self )._flatten_past_key_values_(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
| 5
| 0
|
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class snake_case__ ( UpperCamelCase_ ):
_lowerCAmelCase =42
_lowerCAmelCase =42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 170
|
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase : Union[str, Any] = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowercase__( A , A=None ):
require_version(deps[pkg] , A )
| 170
| 1
|
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __A :
def __init__( self : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : str=13 , lowerCamelCase : Optional[int]=30 , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : int=3 , lowerCamelCase : Optional[Any]=True , lowerCamelCase : Any=True , lowerCamelCase : Tuple=32 , lowerCamelCase : int=5 , lowerCamelCase : List[Any]=4 , lowerCamelCase : Any=37 , lowerCamelCase : List[str]="gelu" , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : List[str]=10 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : int=3 , lowerCamelCase : int=0.6 , lowerCamelCase : Optional[Any]=None , ):
"""simple docstring"""
__A : Dict = parent
__A : Union[str, Any] = batch_size
__A : str = image_size
__A : Union[str, Any] = patch_size
__A : Tuple = num_channels
__A : List[str] = is_training
__A : Any = use_labels
__A : int = hidden_size
__A : Dict = num_hidden_layers
__A : Any = num_attention_heads
__A : Optional[int] = intermediate_size
__A : Optional[Any] = hidden_act
__A : Union[str, Any] = hidden_dropout_prob
__A : Tuple = attention_probs_dropout_prob
__A : Optional[Any] = type_sequence_label_size
__A : Dict = initializer_range
__A : Optional[Any] = mask_ratio
__A : List[str] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__A : int = (image_size // patch_size) ** 2
__A : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowercase_( self : int ):
"""simple docstring"""
__A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A : Tuple = None
if self.use_labels:
__A : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowercase_( self : Optional[Any] ):
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowercase_( self : Optional[int] , lowerCamelCase : int , lowerCamelCase : Optional[int] , lowerCamelCase : str ):
"""simple docstring"""
__A : Dict = ViTMAEModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__A : Optional[int] = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_( self : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : List[str] , lowerCamelCase : Tuple ):
"""simple docstring"""
__A : Dict = ViTMAEForPreTraining(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__A : Any = model(lowerCamelCase )
__A : Any = (self.image_size // self.patch_size) ** 2
__A : List[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__A : str = 1
__A : List[Any] = ViTMAEForPreTraining(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__A : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A : Dict = model(lowerCamelCase )
__A : Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowercase_( self : Any ):
"""simple docstring"""
__A : Dict = self.prepare_config_and_inputs()
__A , __A , __A : Dict = config_and_inputs
__A : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __A ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowerCamelCase =(ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCamelCase ={'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
lowerCamelCase =False
lowerCamelCase =False
lowerCamelCase =False
lowerCamelCase =False
def lowercase_( self : Union[str, Any] ):
"""simple docstring"""
__A : List[str] = ViTMAEModelTester(self )
__A : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def lowercase_( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowercase_( self : str ):
"""simple docstring"""
pass
def lowercase_( self : Dict ):
"""simple docstring"""
__A , __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Optional[int] = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__A : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def lowercase_( self : List[Any] ):
"""simple docstring"""
__A , __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Optional[int] = model_class(lowerCamelCase )
__A : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Dict = [*signature.parameters.keys()]
__A : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def lowercase_( self : List[Any] ):
"""simple docstring"""
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase_( self : int ):
"""simple docstring"""
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase )
def lowercase_( self : Any , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : Any ):
"""simple docstring"""
np.random.seed(2 )
__A : Any = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
__A : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__A : Optional[Any] = torch.from_numpy(lowerCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__A : int = pt_noise
super().check_pt_tf_models(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowercase_( self : str ):
"""simple docstring"""
__A , __A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Optional[Any] = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__A : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__A : Optional[int] = outputs[0].cpu().numpy()
__A : Dict = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
__A : Dict = model_class.from_pretrained(lowerCamelCase )
model.to(lowerCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__A : Optional[int] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
# Make sure we don't have nans
__A : int = after_outputs[0].cpu().numpy()
__A : Any = 0
__A : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase , 1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowercase_( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowercase_( self : str ):
"""simple docstring"""
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowercase_( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowercase_( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase_( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def lowercase_( self : Union[str, Any] ):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : Dict = ViTMAEModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def A_ ( ) -> Optional[Any]:
"""simple docstring"""
__A : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
@cached_property
def lowercase_( self : List[str] ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowercase_( self : Any ):
"""simple docstring"""
np.random.seed(2 )
__A : Optional[Any] = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(lowerCamelCase )
__A : int = self.default_image_processor
__A : str = prepare_img()
__A : List[str] = image_processor(images=lowerCamelCase , return_tensors="""pt""" ).to(lowerCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__A : Union[str, Any] = ViTMAEConfig()
__A : Dict = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__A : str = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
__A : Dict = model(**lowerCamelCase , noise=torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase ) )
# verify the logits
__A : Optional[int] = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__A : List[str] = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCamelCase ) , atol=1e-4 ) )
| 499
|
'''simple docstring'''
def A_ ( __SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
__A : Optional[int] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def A_ ( __SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
__A : List[str] = 0
while number > 0:
__A : Union[str, Any] = number % 10
sum_of_digits += last_digit
__A : int = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def A_ ( __SCREAMING_SNAKE_CASE : int = 100 ) -> int:
"""simple docstring"""
__A : Union[str, Any] = factorial(__SCREAMING_SNAKE_CASE )
__A : Any = split_and_add(__SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 499
| 1
|
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
SCREAMING_SNAKE_CASE_ : Union[str, Any] = quote(lowerCAmelCase )
return hfh.hf_hub_url(lowerCAmelCase , lowerCAmelCase , repo_type="dataset" , revision=lowerCAmelCase )
| 216
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
__lowerCamelCase : int = {'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = ['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
__lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 216
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : Optional[int] = "levit"
def __init__( self, __magic_name__=224, __magic_name__=3, __magic_name__=3, __magic_name__=2, __magic_name__=1, __magic_name__=16, __magic_name__=[128, 256, 384], __magic_name__=[4, 8, 12], __magic_name__=[4, 4, 4], __magic_name__=[16, 16, 16], __magic_name__=0, __magic_name__=[2, 2, 2], __magic_name__=[2, 2, 2], __magic_name__=0.02, **__magic_name__, ) -> Dict:
"""simple docstring"""
super().__init__(**__magic_name__ )
UpperCamelCase__ : str = image_size
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : Union[str, Any] = kernel_size
UpperCamelCase__ : str = stride
UpperCamelCase__ : str = padding
UpperCamelCase__ : List[str] = hidden_sizes
UpperCamelCase__ : Union[str, Any] = num_attention_heads
UpperCamelCase__ : str = depths
UpperCamelCase__ : List[str] = key_dim
UpperCamelCase__ : Optional[Any] = drop_path_rate
UpperCamelCase__ : List[str] = patch_size
UpperCamelCase__ : List[str] = attention_ratio
UpperCamelCase__ : Dict = mlp_ratio
UpperCamelCase__ : Optional[int] = initializer_range
UpperCamelCase__ : Dict = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : List[Any] = version.parse("1.11" )
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCamelCase__ ( self ) -> float:
"""simple docstring"""
return 1E-4
| 369
|
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__magic_name__, '''embed_dim''' ) )
self.parent.assertTrue(hasattr(__magic_name__, '''num_heads''' ) )
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__=13, __magic_name__=64, __magic_name__=3, __magic_name__=[16, 48, 96], __magic_name__=[1, 3, 6], __magic_name__=[1, 2, 10], __magic_name__=[7, 3, 3], __magic_name__=[4, 2, 2], __magic_name__=[2, 1, 1], __magic_name__=[2, 2, 2], __magic_name__=[False, False, True], __magic_name__=[0.0, 0.0, 0.0], __magic_name__=0.02, __magic_name__=1E-12, __magic_name__=True, __magic_name__=True, __magic_name__=2, ) -> int:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = parent
UpperCamelCase__ : Optional[Any] = batch_size
UpperCamelCase__ : Dict = image_size
UpperCamelCase__ : Optional[Any] = patch_sizes
UpperCamelCase__ : List[str] = patch_stride
UpperCamelCase__ : Tuple = patch_padding
UpperCamelCase__ : Dict = is_training
UpperCamelCase__ : Optional[int] = use_labels
UpperCamelCase__ : Union[str, Any] = num_labels
UpperCamelCase__ : List[Any] = num_channels
UpperCamelCase__ : Optional[Any] = embed_dim
UpperCamelCase__ : List[str] = num_heads
UpperCamelCase__ : Any = stride_kv
UpperCamelCase__ : Any = depth
UpperCamelCase__ : Tuple = cls_token
UpperCamelCase__ : List[Any] = attention_drop_rate
UpperCamelCase__ : Any = initializer_range
UpperCamelCase__ : str = layer_norm_eps
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : int = None
if self.use_labels:
UpperCamelCase__ : Optional[int] = ids_tensor([self.batch_size], self.num_labels )
UpperCamelCase__ : str = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
return CvtConfig(
image_size=self.image_size, num_labels=self.num_labels, num_channels=self.num_channels, embed_dim=self.embed_dim, num_heads=self.num_heads, patch_sizes=self.patch_sizes, patch_padding=self.patch_padding, patch_stride=self.patch_stride, stride_kv=self.stride_kv, depth=self.depth, cls_token=self.cls_token, attention_drop_rate=self.attention_drop_rate, initializer_range=self.initializer_range, )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = CvtModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCamelCase__ : Optional[int] = model(__magic_name__ )
UpperCamelCase__ : List[str] = (self.image_size, self.image_size)
UpperCamelCase__ ,UpperCamelCase__ : Tuple = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCamelCase__ : Optional[int] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCamelCase__ : int = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dim[-1], height, width) )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Tuple = self.num_labels
UpperCamelCase__ : str = CvtForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCamelCase__ : Any = model(__magic_name__, labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : int = self.prepare_config_and_inputs()
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = config_and_inputs
UpperCamelCase__ : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : List[Any] = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
a : Any = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
a : List[str] = False
a : int = False
a : Tuple = False
a : int = False
a : Tuple = False
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Dict = CvtModelTester(self )
UpperCamelCase__ : List[Any] = ConfigTester(self, config_class=__magic_name__, has_text_modality=__magic_name__, hidden_size=37 )
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
pass
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[Any] = model_class(__magic_name__ )
UpperCamelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Optional[int] = [*signature.parameters.keys()]
UpperCamelCase__ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __magic_name__ )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(__magic_name__, __magic_name__, __magic_name__ ):
UpperCamelCase__ : str = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
UpperCamelCase__ : List[Any] = model(**self._prepare_for_class(__magic_name__, __magic_name__ ) )
UpperCamelCase__ : str = outputs.hidden_states
UpperCamelCase__ : Dict = len(self.model_tester.depth )
self.assertEqual(len(__magic_name__ ), __magic_name__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ), [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
], )
UpperCamelCase__ ,UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[int] = True
check_hidden_states_output(__magic_name__, __magic_name__, __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ : int = True
check_hidden_states_output(__magic_name__, __magic_name__, __magic_name__ )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@slow
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Union[str, Any] = CvtModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowerCAmelCase_ ( ) -> int:
UpperCamelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = self.default_image_processor
UpperCamelCase__ : Any = prepare_img()
UpperCamelCase__ : List[str] = image_processor(images=__magic_name__, return_tensors='''pt''' ).to(__magic_name__ )
# forward pass
with torch.no_grad():
UpperCamelCase__ : Tuple = model(**__magic_name__ )
# verify the logits
UpperCamelCase__ : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, __magic_name__ )
UpperCamelCase__ : List[Any] = torch.tensor([0.9285, 0.9015, -0.3150] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __magic_name__, atol=1E-4 ) )
| 369
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """deit"""
def __init__( self , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1e-12 , A_=224 , A_=16 , A_=3 , A_=True , A_=16 , **A_ , )-> Union[str, Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = qkv_bias
UpperCamelCase = encoder_stride
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = version.parse("""1.11""")
@property
def UpperCAmelCase_ ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase_ ( self )-> float:
'''simple docstring'''
return 1e-4
| 3
|
'''simple docstring'''
from __future__ import annotations
import math
def _a ( lowerCamelCase_ ):
if num <= 0:
snake_case : str =F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(lowerCamelCase_ )
snake_case : Optional[int] =[True] * (num + 1)
snake_case : List[str] =[]
snake_case : str =2
snake_case : Union[str, Any] =int(math.sqrt(lowerCamelCase_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCamelCase_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCamelCase_ ):
if sieve[i] is True:
snake_case : List[str] =False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowerCamelCase_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 349
| 0
|
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowercase__ ( lowerCAmelCase__ : bool = True , *lowerCAmelCase__ : str , **lowerCAmelCase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
a__ : Tuple = False
if main_process_only:
a__ : List[str] = PartialState().local_process_index == 0
return _tqdm(*lowerCAmelCase__ , **lowerCAmelCase__ , disable=lowerCAmelCase__ )
| 251
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class __UpperCAmelCase ( _UpperCamelCase ):
def __init__( self : int , *a_ : List[str] , **a_ : Any ) -> None:
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , a_ , )
super().__init__(*a_ , **a_ )
| 251
| 1
|
"""simple docstring"""
from jiwer import compute_measures
import datasets
A_ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
A_ = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
A_ = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 29
|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : torch.FloatTensor
class _lowerCAmelCase ( lowercase_ , lowercase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Tuple , UpperCamelCase__ : int = 3_2 , UpperCamelCase__ : int = 6_4 , UpperCamelCase__ : int = 2_0 , UpperCamelCase__ : int = 7_6_8 , UpperCamelCase__ : Optional[Any]=7_7 , UpperCamelCase__ : str=4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "silu" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = "linear" , UpperCamelCase__ : Optional[str] = "prd" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
snake_case__ = num_attention_heads
snake_case__ = attention_head_dim
snake_case__ = num_attention_heads * attention_head_dim
snake_case__ = additional_embeddings
snake_case__ = time_embed_dim or inner_dim
snake_case__ = embedding_proj_dim or embedding_dim
snake_case__ = clip_embed_dim or embedding_dim
snake_case__ = Timesteps(UpperCamelCase__ , UpperCamelCase__ , 0)
snake_case__ = TimestepEmbedding(UpperCamelCase__ , UpperCamelCase__ , out_dim=UpperCamelCase__ , act_fn=UpperCamelCase__)
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
if embedding_proj_norm_type is None:
snake_case__ = None
elif embedding_proj_norm_type == "layer":
snake_case__ = nn.LayerNorm(UpperCamelCase__)
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''')
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
if encoder_hid_proj_type is None:
snake_case__ = None
elif encoder_hid_proj_type == "linear":
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''')
snake_case__ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase__))
if added_emb_type == "prd":
snake_case__ = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase__))
elif added_emb_type is None:
snake_case__ = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''')
snake_case__ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn="""gelu""" , attention_bias=UpperCamelCase__ , )
for d in range(UpperCamelCase__)
])
if norm_in_type == "layer":
snake_case__ = nn.LayerNorm(UpperCamelCase__)
elif norm_in_type is None:
snake_case__ = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''')
snake_case__ = nn.LayerNorm(UpperCamelCase__)
snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0)
causal_attention_mask.triu_(1)
snake_case__ = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , UpperCamelCase__ , persistent=UpperCamelCase__)
snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__))
snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__))
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = {}
def fn_recursive_add_processors(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Dict[str, AttentionProcessor]):
if hasattr(UpperCamelCase__ , """set_processor"""):
snake_case__ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
return processors
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
'''simple docstring'''
snake_case__ = len(self.attn_processors.keys())
if isinstance(UpperCamelCase__ , UpperCamelCase__) and len(UpperCamelCase__) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(UpperCamelCase__)} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''')
def fn_recursive_attn_processor(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Optional[int]):
if hasattr(UpperCamelCase__ , """set_processor"""):
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
module.set_processor(UpperCamelCase__)
else:
module.set_processor(processor.pop(F'''{name}.processor'''))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__)
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
def __magic_name__ ( self : Dict):
'''simple docstring'''
self.set_attn_processor(AttnProcessor())
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[torch.Tensor, float, int] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.BoolTensor] = None , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
snake_case__ = hidden_states.shape[0]
snake_case__ = timestep
if not torch.is_tensor(UpperCamelCase__):
snake_case__ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device)
elif torch.is_tensor(UpperCamelCase__) and len(timesteps.shape) == 0:
snake_case__ = timesteps[None].to(hidden_states.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case__ = timesteps * torch.ones(UpperCamelCase__ , dtype=timesteps.dtype , device=timesteps.device)
snake_case__ = self.time_proj(UpperCamelCase__)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
snake_case__ = timesteps_projected.to(dtype=self.dtype)
snake_case__ = self.time_embedding(UpperCamelCase__)
if self.embedding_proj_norm is not None:
snake_case__ = self.embedding_proj_norm(UpperCamelCase__)
snake_case__ = self.embedding_proj(UpperCamelCase__)
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
snake_case__ = self.encoder_hidden_states_proj(UpperCamelCase__)
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""")
snake_case__ = self.proj_in(UpperCamelCase__)
snake_case__ = self.positional_embedding.to(hidden_states.dtype)
snake_case__ = []
snake_case__ = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase__)
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape) == 2:
snake_case__ = proj_embeddings[:, None, :]
if len(hidden_states.shape) == 2:
snake_case__ = hidden_states[:, None, :]
snake_case__ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
snake_case__ = self.prd_embedding.to(hidden_states.dtype).expand(UpperCamelCase__ , -1 , -1)
additional_embeds.append(UpperCamelCase__)
snake_case__ = torch.cat(
UpperCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
snake_case__ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
snake_case__ = F.pad(
UpperCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
snake_case__ = hidden_states + positional_embeddings
if attention_mask is not None:
snake_case__ = (1 - attention_mask.to(hidden_states.dtype)) * -1_00_00.0
snake_case__ = F.pad(UpperCamelCase__ , (0, self.additional_embeddings) , value=0.0)
snake_case__ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype)
snake_case__ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0)
if self.norm_in is not None:
snake_case__ = self.norm_in(UpperCamelCase__)
for block in self.transformer_blocks:
snake_case__ = block(UpperCamelCase__ , attention_mask=UpperCamelCase__)
snake_case__ = self.norm_out(UpperCamelCase__)
if self.prd_embedding is not None:
snake_case__ = hidden_states[:, -1]
else:
snake_case__ = hidden_states[:, additional_embeddings_len:]
snake_case__ = self.proj_to_clip_embeddings(UpperCamelCase__)
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase__)
def __magic_name__ ( self : Any , UpperCamelCase__ : Any):
'''simple docstring'''
snake_case__ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 654
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class a_ ( UpperCamelCase_ ):
_snake_case = """gpt_neo"""
_snake_case = ["""past_key_values"""]
_snake_case = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__(self , __a=5_0_2_5_7 , __a=2_0_4_8 , __a=2_0_4_8 , __a=2_4 , __a=[[["global", "local"], 1_2]] , __a=1_6 , __a=None , __a=2_5_6 , __a="gelu_new" , __a=0.0 , __a=0.0 , __a=0.0 , __a=0.1 , __a=1E-5 , __a=0.02 , __a=True , __a=5_0_2_5_6 , __a=5_0_2_5_6 , **__a , ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = vocab_size
__snake_case : Tuple = max_position_embeddings
__snake_case : List[Any] = hidden_size
__snake_case : int = num_layers
__snake_case : Dict = num_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : List[Any] = window_size
__snake_case : Optional[Any] = activation_function
__snake_case : str = resid_dropout
__snake_case : str = embed_dropout
__snake_case : Union[str, Any] = attention_dropout
__snake_case : Any = classifier_dropout
__snake_case : Tuple = layer_norm_epsilon
__snake_case : Any = initializer_range
__snake_case : Optional[int] = use_cache
__snake_case : List[str] = bos_token_id
__snake_case : int = eos_token_id
__snake_case : Any = attention_types
__snake_case : Tuple = self.expand_attention_types_params(__a)
if len(self.attention_layers) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F"""but is `len(config.attention_layers) = {len(self.attention_layers)}`, """
F"""`config.num_layers = {self.num_layers}`. """
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.')
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a)
@staticmethod
def SCREAMING_SNAKE_CASE__ (__a) -> Tuple:
"""simple docstring"""
__snake_case : List[str] = []
for item in attention_types:
for _ in range(item[1]):
attentions.extend(item[0])
return attentions
def _SCREAMING_SNAKE_CASE ( A : Any , A : Tuple , A : Dict , A : List[str] ) -> Optional[int]:
"""simple docstring"""
import torch
__snake_case : str = input.size()
__snake_case : str = len(A )
__snake_case : List[Any] = shape[dimension]
__snake_case : int = torch.arange(0 , A , A )
__snake_case : List[Any] = torch.div(sizedim - size , A , rounding_mode='floor' ) + 1
__snake_case : Dict = torch.arange(A ) + low_indices[:min_length][:, None]
__snake_case : int = [slice(A )] * rank
__snake_case : Dict = indices
__snake_case : List[str] = input[s]
__snake_case : Tuple = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(A )
def _SCREAMING_SNAKE_CASE ( A : Tuple , A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
import torch
__snake_case : Tuple = torch.arange(1 , A )
__snake_case : Tuple = torch.remainder(A , A )
__snake_case : Optional[Any] = remainders == 0
__snake_case : Optional[int] = candidates[divisor_indices]
__snake_case : Tuple = torch.max(A )
return largest_divisor, torch.div(A , A , rounding_mode='floor' )
class a_ ( UpperCamelCase_ ):
@property
def SCREAMING_SNAKE_CASE__ (self) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__snake_case : str = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
if self.use_past:
self.fill_with_past_key_values_(__a , direction='inputs')
__snake_case : List[str] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__snake_case : Dict = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
return self._config.num_heads
def SCREAMING_SNAKE_CASE__ (self , __a , __a = -1 , __a = -1 , __a = False , __a = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__snake_case : int = super(__a , self).generate_dummy_inputs(
__a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a)
# We need to order the input in the way they appears in the forward()
__snake_case : str = OrderedDict({'input_ids': common_inputs['input_ids']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
__snake_case : Optional[Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__snake_case : Optional[Any] = seqlen + 2
__snake_case : str = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__snake_case : List[Any] = [
(torch.zeros(__a), torch.zeros(__a)) for _ in range(self.num_layers)
]
__snake_case : Any = common_inputs['attention_mask']
if self.use_past:
__snake_case : Any = ordered_inputs['attention_mask'].dtype
__snake_case : Optional[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__a , __a , dtype=__a)] , dim=1)
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
return 1_3
| 704
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__A = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _SCREAMING_SNAKE_CASE ( A : Tuple ) -> str:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A )
def _SCREAMING_SNAKE_CASE ( A : int ) -> Optional[int]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
__snake_case : Any = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(A , id=A )
| 61
| 0
|
"""simple docstring"""
lowerCAmelCase__ = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def a__ ( SCREAMING_SNAKE_CASE : bytes ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : List[str] = f"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = "".join(bin(SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for byte in data )
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowerCAmelCase : str = B"=" * ((6 - len(SCREAMING_SNAKE_CASE ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(SCREAMING_SNAKE_CASE ) % 6)
else:
lowerCAmelCase : str = B""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(SCREAMING_SNAKE_CASE ) , 6 ) ).encode()
+ padding
)
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Any = (
"argument should be a bytes-like object or ASCII string, "
f"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(SCREAMING_SNAKE_CASE )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
try:
lowerCAmelCase : List[str] = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowerCAmelCase : Any = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(SCREAMING_SNAKE_CASE ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowerCAmelCase : Optional[int] = encoded_data[:-padding]
lowerCAmelCase : int = "".join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowerCAmelCase : List[str] = "".join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )
lowerCAmelCase : int = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(SCREAMING_SNAKE_CASE ) , 8 )
]
return bytes(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=[30, 30] , snake_case__=2 , snake_case__=3 , snake_case__=True , snake_case__=True , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=10 , snake_case__=0.02 , snake_case__=3 , snake_case__=None , snake_case__=8 , snake_case__=10 , ):
"""simple docstring"""
lowerCAmelCase : Any = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : Optional[Any] = image_size
lowerCAmelCase : Tuple = patch_size
lowerCAmelCase : Optional[int] = num_channels
lowerCAmelCase : List[str] = is_training
lowerCAmelCase : Optional[int] = use_labels
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : str = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Tuple = intermediate_size
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : List[str] = hidden_dropout_prob
lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase : int = type_sequence_label_size
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : List[str] = num_labels
lowerCAmelCase : List[str] = scope
lowerCAmelCase : Dict = n_targets
lowerCAmelCase : Optional[int] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCAmelCase : int = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCAmelCase : Optional[int] = num_patches + 1 + self.num_detection_tokens
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowerCAmelCase : List[Any] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCAmelCase : Dict = []
for i in range(self.batch_size ):
lowerCAmelCase : Any = {}
lowerCAmelCase : Optional[int] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=snake_case__ )
lowerCAmelCase : Optional[Any] = torch.rand(self.n_targets , 4 , device=snake_case__ )
labels.append(snake_case__ )
lowerCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self ):
"""simple docstring"""
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = YolosModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Dict = model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = YolosForObjectDetection(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Optional[int] = model(pixel_values=snake_case__ )
lowerCAmelCase : Tuple = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowerCAmelCase : Any = model(pixel_values=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = config_and_inputs
lowerCAmelCase : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Union[str, Any] =(YolosModel, YolosForObjectDetection) if is_torch_available() else ()
a : Optional[Any] =(
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
a : List[str] =False
a : Dict =False
a : Any =False
a : Tuple =False
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=False ):
"""simple docstring"""
lowerCAmelCase : Any = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCAmelCase : Union[str, Any] = []
for i in range(self.model_tester.batch_size ):
lowerCAmelCase : Tuple = {}
lowerCAmelCase : int = torch.ones(
size=(self.model_tester.n_targets,) , device=snake_case__ , dtype=torch.long )
lowerCAmelCase : Tuple = torch.ones(
self.model_tester.n_targets , 4 , device=snake_case__ , dtype=torch.float )
labels.append(snake_case__ )
lowerCAmelCase : Optional[Any] = labels
return inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = YolosModelTester(self )
lowerCAmelCase : str = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Any = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Optional[Any] = model_class(snake_case__ )
lowerCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
lowerCAmelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = True
# in YOLOS, the seq_len is different
lowerCAmelCase : Union[str, Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCAmelCase : Dict = True
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Optional[Any] = True
lowerCAmelCase : Optional[Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Union[str, Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCAmelCase : Optional[int] = outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase : List[str] = True
lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCAmelCase : List[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCAmelCase : Dict = outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCAmelCase : List[Any] = len(snake_case__ )
# Check attention is always last and order is fine
lowerCAmelCase : str = True
lowerCAmelCase : Any = True
lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCAmelCase : Optional[int] = 1
self.assertEqual(out_len + added_hidden_states , len(snake_case__ ) )
lowerCAmelCase : List[Any] = outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowercase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Any = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCAmelCase : Union[str, Any] = outputs.hidden_states
lowerCAmelCase : Tuple = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
# YOLOS has a different seq_length
lowerCAmelCase : Optional[Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : str = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : Optional[int] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : str = YolosModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(snake_case__ )
lowerCAmelCase : Tuple = self.default_image_processor
lowerCAmelCase : List[Any] = prepare_img()
lowerCAmelCase : Optional[int] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
lowerCAmelCase : Optional[Any] = model(inputs.pixel_values )
# verify outputs
lowerCAmelCase : Optional[int] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase : Optional[Any] = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=snake_case__ , )
lowerCAmelCase : Tuple = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , snake_case__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , snake_case__ , atol=1e-4 ) )
# verify postprocessing
lowerCAmelCase : List[Any] = image_processor.post_process_object_detection(
snake_case__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowerCAmelCase : Optional[int] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(snake_case__ )
lowerCAmelCase : int = [75, 75, 17, 63, 17]
lowerCAmelCase : str = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(snake_case__ )
self.assertEqual(len(results["scores"] ) , 5 )
self.assertTrue(torch.allclose(results["scores"] , snake_case__ , atol=1e-4 ) )
self.assertSequenceEqual(results["labels"].tolist() , snake_case__ )
self.assertTrue(torch.allclose(results["boxes"][0, :] , snake_case__ ) )
| 645
| 1
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class a__ ( datasets.BuilderConfig ):
lowercase_ = None
class a__ ( datasets.ArrowBasedBuilder ):
lowercase_ = PandasConfig
def a_ ( self : Union[str, Any]):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features)
def a_ ( self : int , UpperCamelCase_ : str):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}")
__UpperCAmelCase : int = dl_manager.download_and_extract(self.config.data_files)
if isinstance(UpperCamelCase_ , (str, list, tuple)):
__UpperCAmelCase : str = data_files
if isinstance(UpperCamelCase_ , UpperCamelCase_):
__UpperCAmelCase : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__UpperCAmelCase : int = [dl_manager.iter_files(UpperCamelCase_) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})]
__UpperCAmelCase : Tuple = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase_ , UpperCamelCase_):
__UpperCAmelCase : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__UpperCAmelCase : Union[str, Any] = [dl_manager.iter_files(UpperCamelCase_) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCamelCase_ , gen_kwargs={"files": files}))
return splits
def a_ ( self : Any , UpperCamelCase_ : pa.Table):
"""simple docstring"""
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__UpperCAmelCase : Dict = table_cast(UpperCamelCase_ , self.config.features.arrow_schema)
return pa_table
def a_ ( self : List[str] , UpperCamelCase_ : Any):
"""simple docstring"""
for i, file in enumerate(itertools.chain.from_iterable(UpperCamelCase_)):
with open(UpperCamelCase_ , "rb") as f:
__UpperCAmelCase : Optional[int] = pa.Table.from_pandas(pd.read_pickle(UpperCamelCase_))
yield i, self._cast_table(UpperCamelCase_)
| 710
|
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
A = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class a__ ( unittest.TestCase ):
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/"))
__UpperCAmelCase : Tuple = self.transformer_dir
shutil.copy(
os.path.join(UpperCamelCase_ , "src/transformers/models/bert/modeling_bert.py") , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py") , )
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = "src/transformers"
shutil.rmtree(self.transformer_dir)
def a_ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str]=None):
"""simple docstring"""
__UpperCAmelCase : str = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
__UpperCAmelCase : str = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
__UpperCAmelCase : Optional[int] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119)
__UpperCAmelCase : Dict = black.format_str(UpperCamelCase_ , mode=UpperCamelCase_)
__UpperCAmelCase : Dict = os.path.join(self.transformer_dir , "new_code.py")
with open(UpperCamelCase_ , "w" , newline="\n") as f:
f.write(UpperCamelCase_)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCamelCase_)) == 0)
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCamelCase_)
with open(UpperCamelCase_ , "r") as f:
self.assertTrue(f.read() , UpperCamelCase_)
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Any = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead")
self.assertEqual(UpperCamelCase_ , UpperCamelCase_)
def a_ ( self : Optional[int]):
"""simple docstring"""
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , UpperCamelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , UpperCamelCase_) , )
# Copy consistency with a really long name
__UpperCAmelCase : Optional[Any] = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub("Bert" , UpperCamelCase_ , UpperCamelCase_) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , UpperCamelCase_ , overwrite_result=re.sub("Bert" , "TestModel" , UpperCamelCase_) , )
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : List[str] = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
__UpperCAmelCase : str = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
__UpperCAmelCase : List[str] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
__UpperCAmelCase : str = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
__UpperCAmelCase , __UpperCAmelCase : Any = check_copies.convert_to_localized_md(
UpperCamelCase_ , UpperCamelCase_ , localized_readme["format_model_list"])
self.assertFalse(UpperCamelCase_)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase , __UpperCAmelCase : int = check_copies.convert_to_localized_md(
UpperCamelCase_ , UpperCamelCase_ , localized_readme["format_model_list"])
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(UpperCamelCase_)
__UpperCAmelCase : str = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
__UpperCAmelCase : Optional[int] = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
__UpperCAmelCase : int = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
__UpperCAmelCase , __UpperCAmelCase : List[str] = check_copies.convert_to_localized_md(
UpperCamelCase_ , UpperCamelCase_ , localized_readme["format_model_list"])
# Check if the model link is synchronized.
self.assertEqual(UpperCamelCase_ , UpperCamelCase_)
| 487
| 0
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
UpperCamelCase_ = TypeVar('T')
class _SCREAMING_SNAKE_CASE ( Generic[T] ):
def __init__(self , UpperCAmelCase = True):
'''simple docstring'''
__UpperCAmelCase ={} # dictionary of lists
__UpperCAmelCase =directed
def A__ (self , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase)
self.adj_list[destination_vertex].append(UpperCAmelCase)
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase)
__UpperCAmelCase =[source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(UpperCAmelCase)
__UpperCAmelCase =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
__UpperCAmelCase =[destination_vertex]
__UpperCAmelCase =[source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase)
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase)
__UpperCAmelCase =[]
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
__UpperCAmelCase =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
__UpperCAmelCase =[destination_vertex]
__UpperCAmelCase =[]
return self
def __repr__(self):
'''simple docstring'''
return pformat(self.adj_list)
| 132
|
UpperCamelCase_ = '0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 132
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_UpperCamelCase = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
__lowerCamelCase : Optional[int] =[]
for num in range(len(SCREAMING_SNAKE_CASE ) ):
__lowerCamelCase : int =0
while 2 * i * i <= odd_composites[num]:
__lowerCamelCase : Any =odd_composites[num] - 2 * i * i
if is_prime(SCREAMING_SNAKE_CASE ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(SCREAMING_SNAKE_CASE ) == n:
return list_nums
return []
def lowerCAmelCase_ ( ):
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 363
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_UpperCamelCase = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Union[tf.Tensor, np.ndarray] ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
return list(tensor.shape )
__lowerCamelCase : Tuple =tf.shape(SCREAMING_SNAKE_CASE )
if tensor.shape == tf.TensorShape(SCREAMING_SNAKE_CASE ):
return dynamic
__lowerCamelCase : Union[str, Any] =tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(SCREAMING_SNAKE_CASE )]
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : tf.Tensor , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[str] = None ):
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1E-9 , axis=SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple=1E-5 , SCREAMING_SNAKE_CASE : Union[str, Any]=-1 ):
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
__lowerCamelCase , __lowerCamelCase : Dict =tf.nn.moments(SCREAMING_SNAKE_CASE , axes=[axis] , keepdims=SCREAMING_SNAKE_CASE )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
__lowerCamelCase : Dict =[1] * inputs.shape.rank
__lowerCamelCase : Dict =shape_list(SCREAMING_SNAKE_CASE )[axis]
__lowerCamelCase : List[str] =tf.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[str] =tf.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Compute layer normalization using the batch_normalization
# function.
__lowerCamelCase : int =tf.nn.batch_normalization(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , offset=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , variance_epsilon=SCREAMING_SNAKE_CASE , )
return outputs
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict=0 , SCREAMING_SNAKE_CASE : int=-1 ):
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
__lowerCamelCase : List[str] =tf.shape(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[Any] =tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
__lowerCamelCase : int =tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : tf.Tensor ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , tf.Tensor ):
__lowerCamelCase : Union[str, Any] =tf.convert_to_tensor(SCREAMING_SNAKE_CASE ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
__lowerCamelCase : str =encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
__lowerCamelCase : Optional[int] =encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
__lowerCamelCase : int =(
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : tf.Tensor , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str = "input_ids" ):
'''simple docstring'''
tf.debugging.assert_less(
SCREAMING_SNAKE_CASE , tf.cast(SCREAMING_SNAKE_CASE , dtype=tensor.dtype ) , message=(
F'The maximum value of {tensor_name} ({tf.math.reduce_max(SCREAMING_SNAKE_CASE )}) must be smaller than the embedding '
F'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'
) , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
__lowerCamelCase : Optional[int] =64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
__lowerCamelCase : Any =[x for x in data if len(SCREAMING_SNAKE_CASE ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
F'they are larger than {HDF5_OBJECT_HEADER_LIMIT} '
F'bytes: {bad_attributes}' )
__lowerCamelCase : Tuple =np.asarray(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Any =1
__lowerCamelCase : Optional[Any] =np.array_split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
__lowerCamelCase : Dict =np.array_split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(SCREAMING_SNAKE_CASE ):
__lowerCamelCase : List[Any] =chunk_data
else:
__lowerCamelCase : Optional[int] =data
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
if name in group.attrs:
__lowerCamelCase : Optional[int] =[n.decode('''utf8''' ) if hasattr(SCREAMING_SNAKE_CASE , '''decode''' ) else n for n in group.attrs[name]]
else:
__lowerCamelCase : Tuple =[]
__lowerCamelCase : List[str] =0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(SCREAMING_SNAKE_CASE , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
def _expand_single_ad_tensor(SCREAMING_SNAKE_CASE : List[str] ):
if isinstance(SCREAMING_SNAKE_CASE , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(SCREAMING_SNAKE_CASE , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , SCREAMING_SNAKE_CASE )
| 363
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = ["pixel_values"]
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_55 , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**snake_case__ )
snake_case_ = size if size is not None else {"shortest_edge": 3_84}
snake_case_ = get_size_dict(snake_case__ , default_to_square=snake_case__ )
snake_case_ = do_resize
snake_case_ = size
# Default value set here for backwards compatibility where the value in config is None
snake_case_ = crop_pct if crop_pct is not None else 2_24 / 2_56
snake_case_ = resample
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BICUBIC , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
snake_case_ = get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
snake_case_ = size["shortest_edge"]
if shortest_edge < 3_84:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
snake_case_ = int(shortest_edge / crop_pct )
snake_case_ = get_resize_output_image_size(snake_case__ , size=snake_case__ , default_to_square=snake_case__ )
snake_case_ = resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=snake_case__ , size=(shortest_edge, shortest_edge) , data_format=snake_case__ , **snake_case__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
snake_case__ , size=(shortest_edge, shortest_edge) , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , ):
"""simple docstring"""
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = crop_pct if crop_pct is not None else self.crop_pct
snake_case_ = resample if resample is not None else self.resample
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = image_mean if image_mean is not None else self.image_mean
snake_case_ = image_std if image_std is not None else self.image_std
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(snake_case__ , default_to_square=snake_case__ )
snake_case_ = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_resize and size["shortest_edge"] < 3_84 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
snake_case_ = [self.resize(image=snake_case__ , size=snake_case__ , crop_pct=snake_case__ , resample=snake_case__ ) for image in images]
if do_rescale:
snake_case_ = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
snake_case_ = [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
snake_case_ = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
snake_case_ = {"pixel_values": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
| 187
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : int ="imagegpt"
a : Union[str, Any] =["past_key_values"]
a : Optional[Any] ={
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=512 + 1 , snake_case__=32 * 32 , snake_case__=512 , snake_case__=24 , snake_case__=8 , snake_case__=None , snake_case__="quick_gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1e-5 , snake_case__=0.02 , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=False , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Tuple = vocab_size
lowerCAmelCase : List[Any] = n_positions
lowerCAmelCase : Union[str, Any] = n_embd
lowerCAmelCase : str = n_layer
lowerCAmelCase : Tuple = n_head
lowerCAmelCase : Optional[Any] = n_inner
lowerCAmelCase : Dict = activation_function
lowerCAmelCase : str = resid_pdrop
lowerCAmelCase : Optional[int] = embd_pdrop
lowerCAmelCase : Optional[int] = attn_pdrop
lowerCAmelCase : Union[str, Any] = layer_norm_epsilon
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : Union[str, Any] = scale_attn_weights
lowerCAmelCase : int = use_cache
lowerCAmelCase : List[Any] = scale_attn_by_inverse_layer_idx
lowerCAmelCase : Optional[int] = reorder_and_upcast_attn
lowerCAmelCase : int = tie_word_embeddings
super().__init__(tie_word_embeddings=snake_case__ , **snake_case__ )
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
@property
def lowercase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
] )
def lowercase__ ( self , snake_case__ , snake_case__ = 1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , snake_case__ = 3 , snake_case__ = 32 , snake_case__ = 32 , ):
"""simple docstring"""
lowerCAmelCase : Tuple = self._generate_dummy_images(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : Union[str, Any] = dict(preprocessor(images=snake_case__ , return_tensors=snake_case__ ) )
return inputs
| 645
| 0
|
# Imports
import numpy as np
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: Tuple , _UpperCAmelCase: str=None , _UpperCAmelCase: Optional[Any]=None , _UpperCAmelCase: int=None , _UpperCAmelCase: Any=None , _UpperCAmelCase: Tuple=None ):
self.set_matricies(red=_UpperCAmelCase , green=_UpperCAmelCase , blue=_UpperCAmelCase , red_edge=_UpperCAmelCase , nir=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] , _UpperCAmelCase: Dict=None , _UpperCAmelCase: Tuple=None , _UpperCAmelCase: Union[str, Any]=None , _UpperCAmelCase: Union[str, Any]=None , _UpperCAmelCase: Union[str, Any]=None ):
if red is not None:
_lowerCAmelCase :Dict = red
if green is not None:
_lowerCAmelCase :int = green
if blue is not None:
_lowerCAmelCase :Optional[int] = blue
if red_edge is not None:
_lowerCAmelCase :int = red_edge
if nir is not None:
_lowerCAmelCase :List[str] = nir
return True
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Union[str, Any]="" , _UpperCAmelCase: str=None , _UpperCAmelCase: List[Any]=None , _UpperCAmelCase: Any=None , _UpperCAmelCase: List[str]=None , _UpperCAmelCase: List[str]=None ):
self.set_matricies(red=_UpperCAmelCase , green=_UpperCAmelCase , blue=_UpperCAmelCase , red_edge=_UpperCAmelCase , nir=_UpperCAmelCase )
_lowerCAmelCase :Dict = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
return self.nir * (self.red / (self.green**2))
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
return (self.nir - self.red) / (self.nir + self.red)
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
return (self.nir - self.blue) / (self.nir + self.blue)
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
return (self.nir - self.green) / (self.nir + self.green)
def SCREAMING_SNAKE_CASE__ ( self: int ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Dict=0.0_8 , _UpperCAmelCase: List[str]=1.2_2 , _UpperCAmelCase: Any=0.0_3 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def SCREAMING_SNAKE_CASE__ ( self: Any ):
return (self.nir / self.green) - 1
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
return (self.nir / self.redEdge) - 1
def SCREAMING_SNAKE_CASE__ ( self: str ):
return (self.red - self.blue) / self.red
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :List[Any] = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
return self.nir - self.green
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Dict = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Dict=0.1_6 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: List[str]=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] , _UpperCAmelCase: Any=None , _UpperCAmelCase: Union[str, Any]=None ):
return (self.nir - b) / (a * self.red)
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
return (self.red + self.green + self.blue) / 30.5
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
return self.nir / self.red
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
return (self.rvi() - 1) / (self.rvi() + 1)
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
return self.green / (self.nir + self.red + self.green)
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
return self.nir / (self.nir + self.red + self.green)
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
return self.red / (self.nir + self.red + self.green)
def SCREAMING_SNAKE_CASE__ ( self: Any ):
return (self.green - self.red) / (self.green + self.red)
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
return (self.red - self.green) / (self.red + self.green)
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Optional[Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
_lowerCAmelCase :List[str] = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def SCREAMING_SNAKE_CASE__ ( self: str ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def SCREAMING_SNAKE_CASE__ ( self: str ):
return self.nir / self.red
def SCREAMING_SNAKE_CASE__ ( self: Any ):
return (self.ndvi() + 0.5) ** (1 / 2)
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 720
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :List[Any] = 'ZinengTang/tvlt-base'
_lowerCAmelCase :Tuple = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , **_UpperCAmelCase: List[str] ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict , **_UpperCAmelCase: int ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :Optional[Any] = self.get_image_processor()
_lowerCAmelCase :List[str] = self.get_feature_extractor()
_lowerCAmelCase :List[Any] = TvltProcessor(image_processor=_UpperCAmelCase , feature_extractor=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase :Dict = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _UpperCAmelCase )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :List[str] = self.get_image_processor()
_lowerCAmelCase :Tuple = self.get_feature_extractor()
_lowerCAmelCase :List[str] = TvltProcessor(image_processor=_UpperCAmelCase , feature_extractor=_UpperCAmelCase )
_lowerCAmelCase :str = np.ones([1_2000] )
_lowerCAmelCase :Union[str, Any] = feature_extractor(_UpperCAmelCase , return_tensors='np' )
_lowerCAmelCase :Optional[int] = processor(audio=_UpperCAmelCase , return_tensors='np' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Union[str, Any] = self.get_image_processor()
_lowerCAmelCase :str = self.get_feature_extractor()
_lowerCAmelCase :Any = TvltProcessor(image_processor=_UpperCAmelCase , feature_extractor=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = np.ones([3, 224, 224] )
_lowerCAmelCase :Optional[int] = image_processor(_UpperCAmelCase , return_tensors='np' )
_lowerCAmelCase :List[Any] = processor(images=_UpperCAmelCase , return_tensors='np' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = self.get_image_processor()
_lowerCAmelCase :Optional[Any] = self.get_feature_extractor()
_lowerCAmelCase :Optional[int] = TvltProcessor(image_processor=_UpperCAmelCase , feature_extractor=_UpperCAmelCase )
_lowerCAmelCase :List[str] = np.ones([1_2000] )
_lowerCAmelCase :List[str] = np.ones([3, 224, 224] )
_lowerCAmelCase :Union[str, Any] = processor(audio=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['audio_values', 'audio_mask', 'pixel_values', 'pixel_mask'] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Tuple = self.get_image_processor()
_lowerCAmelCase :Union[str, Any] = self.get_feature_extractor()
_lowerCAmelCase :Union[str, Any] = TvltProcessor(image_processor=_UpperCAmelCase , feature_extractor=_UpperCAmelCase )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='`processor` and `image_processor`+`feature_extractor` model input names do not match' , )
| 382
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class UpperCamelCase__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = StableDiffusionLatentUpscalePipeline
__magic_name__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
__magic_name__ = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
__magic_name__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__magic_name__ = frozenset([] )
__magic_name__ = True
@property
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Optional[Any] = 4
_lowerCAmelCase : Dict = (16, 16)
_lowerCAmelCase : str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case__ )
return image
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = UNetaDConditionModel(
act_fn='gelu' , attention_head_dim=8 , norm_num_groups=snake_case__ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) , in_channels=8 , mid_block_type=snake_case__ , only_cross_attention=snake_case__ , out_channels=5 , resnet_time_scale_shift='scale_shift' , time_embedding_type='fourier' , timestep_post_act='gelu' , up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') , )
_lowerCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
_lowerCAmelCase : List[Any] = EulerDiscreteScheduler(prediction_type='sample' )
_lowerCAmelCase : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='quick_gelu' , projection_dim=512 , )
_lowerCAmelCase : Dict = CLIPTextModel(snake_case__ )
_lowerCAmelCase : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase : Optional[Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
if str(snake_case__ ).startswith('mps' ):
_lowerCAmelCase : Optional[int] = torch.manual_seed(snake_case__ )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCAmelCase : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = "cpu"
_lowerCAmelCase : Union[str, Any] = self.get_dummy_components()
_lowerCAmelCase : int = self.pipeline_class(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Dict = self.get_dummy_inputs(snake_case__ )
_lowerCAmelCase : Optional[int] = pipe(**snake_case__ ).images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
_lowerCAmelCase : Optional[Any] = np.array(
[0.4722_2412, 0.4192_1633, 0.4471_7434, 0.4687_4192, 0.4258_8258, 0.4615_0726, 0.467_7534, 0.4558_3832, 0.4857_9055] )
_lowerCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case__ , 1E-3 )
def a ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def a ( self ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def a ( self ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def a ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def a ( self ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def a ( self ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3E-3 )
def a ( self ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
_lowerCAmelCase : Tuple = self.get_dummy_components()
_lowerCAmelCase : Any = self.pipeline_class(**snake_case__ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(snake_case__ )
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : str = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_lowerCAmelCase : Union[str, Any] = getattr(snake_case__ , scheduler_enum.name )
_lowerCAmelCase : Optional[Any] = scheduler_cls.from_config(pipe.scheduler.config )
_lowerCAmelCase : List[str] = pipe(**snake_case__ )[0]
outputs.append(snake_case__ )
assert check_same_shape(snake_case__ )
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.manual_seed(33 )
_lowerCAmelCase : List[str] = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' , torch_dtype=torch.floataa )
pipe.to('cuda' )
_lowerCAmelCase : int = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
_lowerCAmelCase : Optional[Any] = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
_lowerCAmelCase : str = pipe(snake_case__ , generator=snake_case__ , output_type='latent' ).images
_lowerCAmelCase : Optional[Any] = upscaler(
prompt=snake_case__ , image=snake_case__ , num_inference_steps=20 , guidance_scale=0 , generator=snake_case__ , output_type='np' , ).images[0]
_lowerCAmelCase : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = torch.manual_seed(33 )
_lowerCAmelCase : str = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
_lowerCAmelCase : Tuple = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
_lowerCAmelCase : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' )
_lowerCAmelCase : str = upscaler(
prompt=snake_case__ , image=snake_case__ , num_inference_steps=20 , guidance_scale=0 , generator=snake_case__ , output_type='np' , ).images[0]
_lowerCAmelCase : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 444
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( snake_case__, unittest.TestCase ):
_UpperCAmelCase :Dict = DDIMPipeline
_UpperCAmelCase :List[str] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_UpperCAmelCase :List[Any] = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
_UpperCAmelCase :Optional[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_UpperCAmelCase :Tuple = False
def UpperCAmelCase__ ( self : Optional[Any] ):
torch.manual_seed(0 )
lowerCamelCase_ : Tuple =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
lowerCamelCase_ : Union[str, Any] =DDIMScheduler()
lowerCamelCase_ : int ={"unet": unet, "scheduler": scheduler}
return components
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : Any=0 ):
if str(snake_case__ ).startswith("mps" ):
lowerCamelCase_ : Any =torch.manual_seed(snake_case__ )
else:
lowerCamelCase_ : List[Any] =torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCamelCase_ : List[Any] ={
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : List[Any] ="cpu"
lowerCamelCase_ : List[Any] =self.get_dummy_components()
lowerCamelCase_ : Union[str, Any] =self.pipeline_class(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Any =self.get_dummy_inputs(snake_case__ )
lowerCamelCase_ : List[str] =pipe(**snake_case__ ).images
lowerCamelCase_ : Tuple =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
lowerCamelCase_ : Optional[Any] =np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
lowerCamelCase_ : Dict =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case__ , 1E-3 )
def UpperCAmelCase__ ( self : List[Any] ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def UpperCAmelCase__ ( self : Dict ):
super().test_save_load_local(expected_max_difference=3E-3 )
def UpperCAmelCase__ ( self : str ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def UpperCAmelCase__ ( self : str ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Any ="google/ddpm-cifar10-32"
lowerCamelCase_ : List[Any] =UNetaDModel.from_pretrained(snake_case__ )
lowerCamelCase_ : str =DDIMScheduler()
lowerCamelCase_ : Optional[int] =DDIMPipeline(unet=snake_case__ , scheduler=snake_case__ )
ddim.to(snake_case__ )
ddim.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Optional[int] =torch.manual_seed(0 )
lowerCamelCase_ : str =ddim(generator=snake_case__ , eta=0.0 , output_type="numpy" ).images
lowerCamelCase_ : int =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ : int =np.array([0.1_723, 0.1_617, 0.1_600, 0.1_626, 0.1_497, 0.1_513, 0.1_505, 0.1_442, 0.1_453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : str ="google/ddpm-ema-bedroom-256"
lowerCamelCase_ : Tuple =UNetaDModel.from_pretrained(snake_case__ )
lowerCamelCase_ : Dict =DDIMScheduler.from_pretrained(snake_case__ )
lowerCamelCase_ : str =DDIMPipeline(unet=snake_case__ , scheduler=snake_case__ )
ddpm.to(snake_case__ )
ddpm.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : int =torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] =ddpm(generator=snake_case__ , output_type="numpy" ).images
lowerCamelCase_ : Optional[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCamelCase_ : Tuple =np.array([0.0_060, 0.0_201, 0.0_344, 0.0_024, 0.0_018, 0.0_002, 0.0_022, 0.0_000, 0.0_069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 153
| 0
|
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class UpperCAmelCase__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : Any = WavaVecaPhonemeCTCTokenizer
lowerCAmelCase__ : List[str] = False
def _UpperCAmelCase ( self: Any ) -> List[str]:
'''simple docstring'''
super().setUp()
__UpperCAmelCase = (
"<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː "
"ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː "
"ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 "
"oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ "
"pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ "
"yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ "
"əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ "
"ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ "
"ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ "
"uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ "
"ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ "
"ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ "
"ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"
).split(" " )
__UpperCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
__UpperCAmelCase = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + "\n" )
def _UpperCAmelCase ( self: List[Any] , __lowerCAmelCase: Tuple , __lowerCAmelCase: Dict=False , __lowerCAmelCase: Union[str, Any]=20 , __lowerCAmelCase: Tuple=5 ) -> Tuple[str, list]:
'''simple docstring'''
__UpperCAmelCase = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCAmelCase )) for i in range(len(__lowerCAmelCase ) )]
__UpperCAmelCase = list(filter(lambda __lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=__lowerCAmelCase ) , __lowerCAmelCase ) )
if max_length is not None and len(__lowerCAmelCase ) > max_length:
__UpperCAmelCase = toks[:max_length]
if min_length is not None and len(__lowerCAmelCase ) < min_length and len(__lowerCAmelCase ) > 0:
while len(__lowerCAmelCase ) < min_length:
__UpperCAmelCase = toks + toks
# toks_str = [t[1] for t in toks]
__UpperCAmelCase = [t[0] for t in toks]
# Ensure consistency
__UpperCAmelCase = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
if " " not in output_txt and len(__lowerCAmelCase ) > 1:
__UpperCAmelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCAmelCase )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCAmelCase )
)
if with_prefix_space:
__UpperCAmelCase = " " + output_txt
__UpperCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
return output_txt, output_ids
def _UpperCAmelCase ( self: str , **__lowerCAmelCase: List[Any] ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _UpperCAmelCase ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
# check adding a single token
tokenizer.add_tokens("xxx" )
__UpperCAmelCase = tokenizer("m xxx ɪ" , do_phonemize=__lowerCAmelCase ).input_ids
self.assertEqual(__lowerCAmelCase , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"] )
__UpperCAmelCase = tokenizer("m aaa ɪ ccc" , do_phonemize=__lowerCAmelCase ).input_ids
self.assertEqual(__lowerCAmelCase , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
__UpperCAmelCase = tokenizer("maɪ c" , do_phonemize=__lowerCAmelCase ).input_ids
self.assertEqual(__lowerCAmelCase , [3, 200] ) # mai should be <unk> (=3)
def _UpperCAmelCase ( self: int ) -> int:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__UpperCAmelCase = "Hello how are you"
__UpperCAmelCase = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang="en-us" )
self.assertEqual(__lowerCAmelCase , "h ə l oʊ h aʊ ɑːɹ j uː" )
def _UpperCAmelCase ( self: List[str] ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__UpperCAmelCase = "Hello how are you"
__UpperCAmelCase = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(__lowerCAmelCase ).input_ids , tokenizer(__lowerCAmelCase , do_phonemize=__lowerCAmelCase ).input_ids )
def _UpperCAmelCase ( self: Any ) -> Any:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__UpperCAmelCase = "Hello how are you"
__UpperCAmelCase = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang="en-us" )
__UpperCAmelCase = tokenizer.decode(tokenizer(__lowerCAmelCase ).input_ids )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def _UpperCAmelCase ( self: str ) -> Any:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
__UpperCAmelCase = tokenizer.decode(sample_ids[0] )
__UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , batch_tokens[0] )
self.assertEqual(__lowerCAmelCase , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
def _UpperCAmelCase ( self: Optional[int] ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
__UpperCAmelCase = "Hello how are you"
__UpperCAmelCase = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang="en-us" )
self.assertEqual(__lowerCAmelCase , "h ə l oʊ | h aʊ | ɑːɹ | j uː |" )
def _UpperCAmelCase ( self: Union[str, Any] ) -> str:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
__UpperCAmelCase = "Hello how are you"
__UpperCAmelCase = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(__lowerCAmelCase ).input_ids , tokenizer(__lowerCAmelCase , do_phonemize=__lowerCAmelCase ).input_ids )
def _UpperCAmelCase ( self: List[str] ) -> int:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
__UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
__UpperCAmelCase = tokenizer.decode(sample_ids[0] )
__UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , batch_tokens[0] )
self.assertEqual(__lowerCAmelCase , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
# decode with no word_del_token filter
__UpperCAmelCase = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=__lowerCAmelCase )
__UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , filter_word_delimiter_token=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , batch_tokens[0] )
self.assertEqual(__lowerCAmelCase , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"] )
def _UpperCAmelCase ( self: Any ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
__UpperCAmelCase = "Hello how are you"
__UpperCAmelCase = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang="en-us" )
__UpperCAmelCase = tokenizer.decode(tokenizer(__lowerCAmelCase ).input_ids , filter_word_delimiter_token=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def _UpperCAmelCase ( self: Dict ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
__UpperCAmelCase = "Hello how are you"
__UpperCAmelCase = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang="en-us" )
__UpperCAmelCase = tokenizer.decode(tokenizer(__lowerCAmelCase ).input_ids , filter_word_delimiter_token=__lowerCAmelCase )
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |" )] ).strip() , __lowerCAmelCase )
def _UpperCAmelCase ( self: Tuple ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=__lowerCAmelCase )
__UpperCAmelCase = "Hello how are you"
__UpperCAmelCase = tokenizer(__lowerCAmelCase , phonemizer_lang="en-us" ).input_ids
__UpperCAmelCase = tokenizer(__lowerCAmelCase , phonemizer_lang="fr-fr" ).input_ids
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = tokenizer.decode(__lowerCAmelCase )
__UpperCAmelCase = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , "h ə l oʊ h aʊ ɑːɹ j uː" )
self.assertEqual(__lowerCAmelCase , "ɛ l o h aʊ a ʁ j u" )
def _UpperCAmelCase ( self: Tuple ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__UpperCAmelCase = "Hello how Are you"
__UpperCAmelCase = "hello how are you"
__UpperCAmelCase = tokenizer(__lowerCAmelCase ).input_ids
__UpperCAmelCase = tokenizer(__lowerCAmelCase ).input_ids
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def _UpperCAmelCase ( self: List[str] ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
tokenizer.add_tokens(["!", "?"] )
tokenizer.add_special_tokens({"cls_token": "$$$"} )
# fmt: off
__UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
__UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"] )
@staticmethod
def _UpperCAmelCase ( __lowerCAmelCase: List[Any] , __lowerCAmelCase: List[str] ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = [d[key] for d in offsets]
return retrieved_list
def _UpperCAmelCase ( self: int ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.get_tokenizer(word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
__UpperCAmelCase = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
__UpperCAmelCase = tokenizer.decode(__lowerCAmelCase , output_char_offsets=__lowerCAmelCase , filter_word_delimiter_token=__lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("text" in outputs )
self.assertTrue("char_offsets" in outputs )
self.assertTrue(isinstance(__lowerCAmelCase , __lowerCAmelCase ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "char" ) , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "start_offset" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "end_offset" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def _UpperCAmelCase ( self: Tuple ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = self.get_tokenizer(word_delimiter_token="|" )
def check_list_tuples_equal(__lowerCAmelCase: List[Any] , __lowerCAmelCase: Optional[int] ):
self.assertTrue(isinstance(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertTrue(isinstance(outputs_list[0] , __lowerCAmelCase ) )
# transform list to ModelOutput
__UpperCAmelCase = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"] )
def recursive_check(__lowerCAmelCase: Optional[int] , __lowerCAmelCase: List[Any] ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
[recursive_check(__lowerCAmelCase , __lowerCAmelCase ) for la, la in zip(__lowerCAmelCase , __lowerCAmelCase )]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"] )
# fmt: off
__UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
__UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , output_char_offsets=__lowerCAmelCase )
__UpperCAmelCase = [tokenizer.decode(__lowerCAmelCase , output_char_offsets=__lowerCAmelCase ) for ids in sample_ids]
check_list_tuples_equal(__lowerCAmelCase , __lowerCAmelCase )
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes" )
def _UpperCAmelCase ( self: Dict ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes" )
def _UpperCAmelCase ( self: Dict ) -> int:
'''simple docstring'''
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" )
def _UpperCAmelCase ( self: Union[str, Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing" )
def _UpperCAmelCase ( self: List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def _UpperCAmelCase ( self: str ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase = tokenizer.vocab_size
__UpperCAmelCase = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__UpperCAmelCase = ["aaaaa bbbbbb", "cccccccccdddddddd"]
__UpperCAmelCase = tokenizer.add_tokens(__lowerCAmelCase )
__UpperCAmelCase = tokenizer.vocab_size
__UpperCAmelCase = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , 0 )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase , all_size + len(__lowerCAmelCase ) )
__UpperCAmelCase = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__UpperCAmelCase = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
__UpperCAmelCase = tokenizer.add_special_tokens(__lowerCAmelCase )
__UpperCAmelCase = tokenizer.vocab_size
__UpperCAmelCase = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , 0 )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase , all_size_a + len(__lowerCAmelCase ) )
__UpperCAmelCase = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def _UpperCAmelCase ( self: Any ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def _UpperCAmelCase ( self: str ) -> Optional[Any]:
'''simple docstring'''
pass
def _UpperCAmelCase ( self: Dict ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.get_tokenizers(fast=__lowerCAmelCase , do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"]
__UpperCAmelCase = tokenizer.convert_tokens_to_string(__lowerCAmelCase )
self.assertIsInstance(output["text"] , __lowerCAmelCase )
| 286
|
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=snake_case ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = ['transformers', 'torch', 'note_seq']
def __init__( self: List[str] , *__lowerCAmelCase: Optional[int] , **__lowerCAmelCase: List[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def _UpperCAmelCase ( cls: Optional[int] , *__lowerCAmelCase: Any , **__lowerCAmelCase: List[str] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def _UpperCAmelCase ( cls: Union[str, Any] , *__lowerCAmelCase: Optional[Any] , **__lowerCAmelCase: Optional[Any] ) -> Any:
'''simple docstring'''
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 286
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'luke'
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=50267 , UpperCAmelCase__ : List[Any]=500000 , UpperCAmelCase__ : Tuple=768 , UpperCAmelCase__ : Dict=256 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : List[str]=3072 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Tuple=512 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : List[str]=1E-12 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : List[str]=2 , **UpperCAmelCase__ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : List[Any] =vocab_size
lowercase : Optional[Any] =entity_vocab_size
lowercase : Dict =hidden_size
lowercase : List[str] =entity_emb_size
lowercase : str =num_hidden_layers
lowercase : Dict =num_attention_heads
lowercase : Any =hidden_act
lowercase : str =intermediate_size
lowercase : Any =hidden_dropout_prob
lowercase : Dict =attention_probs_dropout_prob
lowercase : List[str] =max_position_embeddings
lowercase : Union[str, Any] =type_vocab_size
lowercase : Union[str, Any] =initializer_range
lowercase : int =layer_norm_eps
lowercase : str =use_entity_aware_attention
lowercase : Any =classifier_dropout
| 92
|
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 600851475143 ) -> int:
try:
lowercase : Any =int(__magic_name__ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Optional[Any] =2
lowercase : Dict =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowercase : Union[str, Any] =i
while n % i == 0:
lowercase : Optional[int] =n // i
i += 1
return int(__magic_name__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 92
| 1
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _lowercase ( UpperCamelCase__ : int, UpperCamelCase__ : Dict=False ):
__A : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""module.blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""module.blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__A : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def _lowercase ( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : int, UpperCamelCase__ : List[str]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__A : Dict = ""
else:
__A : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__A : Any = state_dict.pop(f"""module.blocks.{i}.attn.qkv.weight""" )
__A : List[str] = state_dict.pop(f"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__A : Tuple = in_proj_weight[
: config.hidden_size, :
]
__A : str = in_proj_bias[: config.hidden_size]
__A : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__A : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__A : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
__A : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def _lowercase ( UpperCamelCase__ : List[str] ):
__A : Dict = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase, _lowerCamelCase )
def _lowercase ( UpperCamelCase__ : str ):
__A : List[Any] = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase, _lowerCamelCase )
def _lowercase ( UpperCamelCase__ : Optional[int], UpperCamelCase__ : List[str], UpperCamelCase__ : int ):
__A : List[Any] = dct.pop(_lowerCamelCase )
__A : Union[str, Any] = val
def _lowercase ( UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[int] ):
__A : List[str] = ViTMSNConfig()
__A : Any = 1000
__A : Optional[int] = "datasets/huggingface/label-files"
__A : Any = "imagenet-1k-id2label.json"
__A : Any = json.load(open(hf_hub_download(_lowerCamelCase, _lowerCamelCase ), 'r' ) )
__A : Optional[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__A : Dict = idalabel
__A : List[str] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__A : Optional[int] = 384
__A : Optional[Any] = 1536
__A : List[Any] = 6
elif "l16" in checkpoint_url:
__A : str = 1024
__A : str = 4096
__A : Dict = 24
__A : Optional[int] = 16
__A : Optional[Any] = 0.1
elif "b4" in checkpoint_url:
__A : Optional[int] = 4
elif "l7" in checkpoint_url:
__A : int = 7
__A : Optional[Any] = 1024
__A : List[Any] = 4096
__A : Optional[Any] = 24
__A : Tuple = 16
__A : int = 0.1
__A : str = ViTMSNModel(_lowerCamelCase )
__A : List[str] = torch.hub.load_state_dict_from_url(_lowerCamelCase, map_location='cpu' )["target_encoder"]
__A : Union[str, Any] = ViTImageProcessor(size=config.image_size )
remove_projection_head(_lowerCamelCase )
__A : str = create_rename_keys(_lowerCamelCase, base_model=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase, _lowerCamelCase, base_model=_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__A : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
__A : Tuple = Image.open(requests.get(_lowerCamelCase, stream=_lowerCamelCase ).raw )
__A : Dict = ViTImageProcessor(
size=config.image_size, image_mean=_lowerCamelCase, image_std=_lowerCamelCase )
__A : str = image_processor(images=_lowerCamelCase, return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
__A : Tuple = model(**_lowerCamelCase )
__A : Any = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__A : Optional[Any] = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
__A : Dict = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
__A : Union[str, Any] = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
__A : int = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
__A : Optional[Any] = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3], _lowerCamelCase, atol=1E-4 )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase_ : Dict = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 704
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _lowerCamelCase :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=False , __lowercase=True , __lowercase=99 , __lowercase=32 , __lowercase=5 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=16 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=3 , __lowercase=4 , __lowercase=None , ):
"""simple docstring"""
__A : Optional[int] = parent
__A : Tuple = batch_size
__A : Optional[int] = seq_length
__A : Tuple = is_training
__A : Optional[Any] = use_input_mask
__A : Optional[Any] = use_token_type_ids
__A : Optional[int] = use_labels
__A : str = vocab_size
__A : Dict = hidden_size
__A : Tuple = num_hidden_layers
__A : Optional[int] = num_attention_heads
__A : str = intermediate_size
__A : List[Any] = hidden_act
__A : List[str] = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : int = max_position_embeddings
__A : int = type_vocab_size
__A : int = type_sequence_label_size
__A : str = initializer_range
__A : str = num_labels
__A : str = num_choices
__A : Any = scope
def snake_case__ ( self ):
"""simple docstring"""
__A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : Union[str, Any] = None
if self.use_input_mask:
__A : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__A : str = None
if self.use_token_type_ids:
__A : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : Union[str, Any] = None
__A : Optional[int] = None
__A : List[str] = None
if self.use_labels:
__A : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__A : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , use_stable_embedding=__lowercase , )
def snake_case__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
__A : List[str] = OpenLlamaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__A : Any = model(__lowercase , attention_mask=__lowercase )
__A : Dict = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
"""simple docstring"""
__A : List[str] = True
__A : int = OpenLlamaModel(__lowercase )
model.to(__lowercase )
model.eval()
__A : List[Any] = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , )
__A : Optional[Any] = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , )
__A : Optional[Any] = model(__lowercase , attention_mask=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
"""simple docstring"""
__A : Dict = OpenLlamaForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
__A : Optional[Any] = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
"""simple docstring"""
__A : List[Any] = True
__A : Optional[int] = True
__A : Dict = OpenLlamaForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
# first forward pass
__A : List[str] = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , use_cache=__lowercase , )
__A : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__A : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
__A : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__A : int = torch.cat([input_ids, next_tokens] , dim=-1 )
__A : Dict = torch.cat([input_mask, next_mask] , dim=-1 )
__A : Tuple = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , output_hidden_states=__lowercase , )['hidden_states'][0]
__A : Any = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , past_key_values=__lowercase , output_hidden_states=__lowercase , )['hidden_states'][0]
# select random slice
__A : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__A : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__A : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1E-3 ) )
def snake_case__ ( self ):
"""simple docstring"""
__A : int = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,
) : Optional[Any] = config_and_inputs
__A : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__lowercase : List[Any] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__lowercase : Optional[int] = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : Union[str, Any] = False
__lowercase : Union[str, Any] = False
def snake_case__ ( self ):
"""simple docstring"""
__A : Optional[int] = OpenLlamaModelTester(self )
__A : List[str] = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def snake_case__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self ):
"""simple docstring"""
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def snake_case__ ( self ):
"""simple docstring"""
__A : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A : List[str] = type
self.model_tester.create_and_check_model(*__lowercase )
def snake_case__ ( self ):
"""simple docstring"""
__A ,__A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : Union[str, Any] = 3
__A : int = input_dict['input_ids']
__A : int = input_ids.ne(1 ).to(__lowercase )
__A : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A : Optional[Any] = OpenLlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
__A : Optional[int] = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__ ( self ):
"""simple docstring"""
__A ,__A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = 3
__A : List[str] = 'single_label_classification'
__A : Dict = input_dict['input_ids']
__A : Dict = input_ids.ne(1 ).to(__lowercase )
__A : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__A : Union[str, Any] = OpenLlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
__A : Dict = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__ ( self ):
"""simple docstring"""
__A ,__A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__A : str = 3
__A : int = 'multi_label_classification'
__A : Union[str, Any] = input_dict['input_ids']
__A : str = input_ids.ne(1 ).to(__lowercase )
__A : Union[str, Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__A : int = OpenLlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
__A : Any = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def snake_case__ ( self ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
__A ,__A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[Any] = ids_tensor([1, 10] , config.vocab_size )
__A : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A : Union[str, Any] = OpenLlamaModel(__lowercase )
original_model.to(__lowercase )
original_model.eval()
__A : Optional[int] = original_model(__lowercase ).last_hidden_state
__A : int = original_model(__lowercase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__A : List[Any] = {'type': scaling_type, 'factor': 1_0.0}
__A : str = OpenLlamaModel(__lowercase )
scaled_model.to(__lowercase )
scaled_model.eval()
__A : Dict = scaled_model(__lowercase ).last_hidden_state
__A : List[str] = scaled_model(__lowercase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__lowercase , __lowercase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__lowercase , __lowercase , atol=1E-5 ) )
| 540
| 0
|
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase = logging.get_logger(__name__)
_lowercase = {"""vocab_file""": """spiece.model"""}
_lowercase = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
_lowercase = {
"""AI-Sweden/gpt-sw3-126m""": 2048,
"""AI-Sweden/gpt-sw3-350m""": 2048,
"""AI-Sweden/gpt-sw3-1.6b""": 2048,
"""AI-Sweden/gpt-sw3-6.7b""": 2048,
"""AI-Sweden/gpt-sw3-20b""": 2048,
}
class lowercase_ ( A ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ["input_ids", "attention_mask"]
def __init__( self , __A , __A=False , __A=False , __A=False , __A=None , __A=None , __A=None , __A=None , __A = None , **__A , ) -> None:
SCREAMING_SNAKE_CASE_ : str ={} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE_ : List[str] =kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
SCREAMING_SNAKE_CASE_ : Tuple ='''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
SCREAMING_SNAKE_CASE_ : List[Any] ='''<|endoftext|>''' if eos_token is None else eos_token
SCREAMING_SNAKE_CASE_ : List[str] ='''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
SCREAMING_SNAKE_CASE_ : List[Any] =unk_token if pad_token is None else pad_token
SCREAMING_SNAKE_CASE_ : Optional[Any] =eos_token if bos_token is None else bos_token
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] ='''<pad>''' if pad_token is None else pad_token
SCREAMING_SNAKE_CASE_ : List[Any] ='''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , pad_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
SCREAMING_SNAKE_CASE_ : str =do_lower_case
SCREAMING_SNAKE_CASE_ : str =remove_space
SCREAMING_SNAKE_CASE_ : Optional[int] =keep_accents
SCREAMING_SNAKE_CASE_ : Any =vocab_file
SCREAMING_SNAKE_CASE_ : Optional[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
SCREAMING_SNAKE_CASE_ : Tuple ={''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
SCREAMING_SNAKE_CASE_ : List[str] =re.compile(
F'[{"".join(map(__A , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]' )
def __getstate__( self ) -> str:
SCREAMING_SNAKE_CASE_ : List[Any] =self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Tuple =None
return state
def __setstate__( self , __A ) -> int:
SCREAMING_SNAKE_CASE_ : Optional[Any] =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE_ : List[str] ={}
SCREAMING_SNAKE_CASE_ : int =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _snake_case ( self ) -> int:
return len(self.sp_model )
def _snake_case ( self , __A ) -> str:
SCREAMING_SNAKE_CASE_ : Dict =self.non_printing_characters_re.sub('''''' , __A )
# Normalize whitespaces
SCREAMING_SNAKE_CASE_ : Union[str, Any] =''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
SCREAMING_SNAKE_CASE_ : Dict =unicodedata.normalize('''NFC''' , __A )
return text
def _snake_case ( self , __A , **__A ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Tuple =self.preprocess_text(__A )
return self.sp_model.encode(__A , out_type=__A )
def _snake_case ( self , __A ) -> int:
return self.sp_model.PieceToId(__A )
def _snake_case ( self , __A ) -> str:
return self.sp_model.IdToPiece(__A )
@staticmethod
def _snake_case ( __A ) -> str:
return out_string
def _snake_case ( self , __A ) -> str:
SCREAMING_SNAKE_CASE_ : Optional[Any] =[]
SCREAMING_SNAKE_CASE_ : Any =''''''
SCREAMING_SNAKE_CASE_ : Any =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
SCREAMING_SNAKE_CASE_ : Optional[Any] =True
SCREAMING_SNAKE_CASE_ : Dict =[]
else:
current_sub_tokens.append(__A )
SCREAMING_SNAKE_CASE_ : List[Any] =False
out_string += self.sp_model.decode(__A )
return out_string
def _snake_case ( self ) -> Dict[str, int]:
SCREAMING_SNAKE_CASE_ : Optional[int] ={self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , __A , __A = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Tuple =os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE_ : Dict =self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def _snake_case ( self , __A , __A = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(__A , __A ):
SCREAMING_SNAKE_CASE_ : Dict =self.preprocess_text(__A )
SCREAMING_SNAKE_CASE_ : str =self.sp_model.encode(__A )
else:
SCREAMING_SNAKE_CASE_ : List[Any] =[self.preprocess_text(__A ) for t in text]
SCREAMING_SNAKE_CASE_ : int =self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.tensor(__A )
return token_ids
def _snake_case ( self , __A ) -> str:
return self.sp_model.decode(__A )
def _snake_case ( self , __A ) -> List[int]:
SCREAMING_SNAKE_CASE_ : Optional[int] =[F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
SCREAMING_SNAKE_CASE_ : List[str] =(
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(__A ) + F'{self.bos_token}Bot:'
)
return self.encode(text=__A )
| 443
|
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int | float | str ) -> tuple[int, int]:
try:
SCREAMING_SNAKE_CASE_ : int =float(UpperCAmelCase_ )
except ValueError:
raise ValueError('''Please enter a valid number''' )
SCREAMING_SNAKE_CASE_ : Any =decimal - int(UpperCAmelCase_ )
if fractional_part == 0:
return int(UpperCAmelCase_ ), 1
else:
SCREAMING_SNAKE_CASE_ : Any =len(str(UpperCAmelCase_ ).split('''.''' )[1] )
SCREAMING_SNAKE_CASE_ : str =int(decimal * (1_0**number_of_frac_digits) )
SCREAMING_SNAKE_CASE_ : Any =1_0**number_of_frac_digits
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple =denominator, numerator
while True:
SCREAMING_SNAKE_CASE_ : Any =dividend % divisor
if remainder == 0:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] =divisor, remainder
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] =numerator / divisor, denominator / divisor
return int(UpperCAmelCase_ ), int(UpperCAmelCase_ )
if __name__ == "__main__":
print(F"{decimal_to_fraction(2) = }")
print(F"{decimal_to_fraction(89.0) = }")
print(F"{decimal_to_fraction('67') = }")
print(F"{decimal_to_fraction('45.0') = }")
print(F"{decimal_to_fraction(1.5) = }")
print(F"{decimal_to_fraction('6.25') = }")
print(F"{decimal_to_fraction('78td') = }")
| 443
| 1
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__UpperCAmelCase = logging.getLogger(__name__)
def A_ ( ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=lowercase_ , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=lowercase_ , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=lowercase_ , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=lowercase_ , default=1_0_0_0 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=lowercase_ , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=lowercase_ , type=lowercase_ , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=lowercase_ , default=5_1_2 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=lowercase_ , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
SCREAMING_SNAKE_CASE = parser.parse_args()
return args
def A_ ( lowercase_ ) ->Optional[Any]:
"""simple docstring"""
def fn(lowercase_ ):
return tokenizer(examples['text'] )
return fn
def A_ ( lowercase_ ) ->List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
for i in range(len(tokenized_data['input_ids'] ) ):
SCREAMING_SNAKE_CASE = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
SCREAMING_SNAKE_CASE = tf.train.Features(feature=lowercase_ )
SCREAMING_SNAKE_CASE = tf.train.Example(features=lowercase_ )
SCREAMING_SNAKE_CASE = example.SerializeToString()
records.append(lowercase_ )
return records
def A_ ( lowercase_ ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
SCREAMING_SNAKE_CASE = min(len(lowercase_ ) , args.limit )
SCREAMING_SNAKE_CASE = dataset.select(range(lowercase_ ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowercase_ ):
os.makedirs(lowercase_ )
else:
SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
SCREAMING_SNAKE_CASE = tokenize_function(lowercase_ )
SCREAMING_SNAKE_CASE = dataset.map(lowercase_ , batched=lowercase_ , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowercase_ ):
# Concatenate all texts.
SCREAMING_SNAKE_CASE = {k: sum(examples[k] , [] ) for k in examples.keys()}
SCREAMING_SNAKE_CASE = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
SCREAMING_SNAKE_CASE = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
SCREAMING_SNAKE_CASE = {
k: [t[i : i + args.max_length] for i in range(0 , lowercase_ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
SCREAMING_SNAKE_CASE = dataset_tokenized.map(lowercase_ , batched=lowercase_ , batch_size=1_0_0_0 , num_proc=4 )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
for shard in range(0 , len(lowercase_ ) , args.shard_size ):
SCREAMING_SNAKE_CASE = grouped_dataset[shard : shard + args.shard_size]
SCREAMING_SNAKE_CASE = len(dataset_snapshot['input_ids'] )
SCREAMING_SNAKE_CASE = os.path.join(lowercase_ , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
SCREAMING_SNAKE_CASE = get_serialized_examples(lowercase_ )
with tf.io.TFRecordWriter(lowercase_ ) as out_file:
for i in range(len(lowercase_ ) ):
SCREAMING_SNAKE_CASE = serialized_examples[i]
out_file.write(lowercase_ )
print('Wrote file {} containing {} records'.format(lowercase_ , lowercase_ ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , 'w' ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=lowercase_ )
if __name__ == "__main__":
__UpperCAmelCase = parse_args()
main(args)
| 259
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A_ ( lowercase_ , lowercase_ ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = args.log_outputs
SCREAMING_SNAKE_CASE = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
SCREAMING_SNAKE_CASE = load_metric('wer' )
SCREAMING_SNAKE_CASE = load_metric('cer' )
# compute metrics
SCREAMING_SNAKE_CASE = wer.compute(references=result['target'] , predictions=result['prediction'] )
SCREAMING_SNAKE_CASE = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
SCREAMING_SNAKE_CASE = f'''WER: {wer_result}\nCER: {cer_result}'''
print(lowercase_ )
with open(f'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(lowercase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
SCREAMING_SNAKE_CASE = f'''log_{dataset_id}_predictions.txt'''
SCREAMING_SNAKE_CASE = f'''log_{dataset_id}_targets.txt'''
with open(lowercase_ , 'w' ) as p, open(lowercase_ , 'w' ) as t:
# mapping function to write output
def write_to_file(lowercase_ , lowercase_ ):
p.write(f'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(lowercase_ , with_indices=lowercase_ )
def A_ ( lowercase_ ) ->str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
SCREAMING_SNAKE_CASE = re.sub(lowercase_ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
SCREAMING_SNAKE_CASE = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
SCREAMING_SNAKE_CASE = ' '.join(text.split(lowercase_ ) )
return text
def A_ ( lowercase_ ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowercase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(args.model_id )
SCREAMING_SNAKE_CASE = feature_extractor.sampling_rate
# resample audio
SCREAMING_SNAKE_CASE = dataset.cast_column('audio' , Audio(sampling_rate=lowercase_ ) )
# load eval pipeline
if args.device is None:
SCREAMING_SNAKE_CASE = 0 if torch.cuda.is_available() else -1
SCREAMING_SNAKE_CASE = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowercase_ ):
SCREAMING_SNAKE_CASE = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
SCREAMING_SNAKE_CASE = prediction['text']
SCREAMING_SNAKE_CASE = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
SCREAMING_SNAKE_CASE = dataset.map(lowercase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowercase_ , lowercase_ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
__UpperCAmelCase = parser.parse_args()
main(args)
| 259
| 1
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def _a ( lowercase__ : int ):
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def _a ( ):
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = "mock-s3-bucket"
SCREAMING_SNAKE_CASE__ : Tuple = f'''s3://{mock_bucket}'''
SCREAMING_SNAKE_CASE__ : str = extract_path_from_uri(lowerCamelCase__ )
assert dataset_path.startswith('s3://' ) is False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "./local/path"
SCREAMING_SNAKE_CASE__ : List[Any] = extract_path_from_uri(lowerCamelCase__ )
assert dataset_path == new_dataset_path
def _a ( lowercase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = is_remote_filesystem(lowerCamelCase__ )
assert is_remote is True
SCREAMING_SNAKE_CASE__ : int = fsspec.filesystem('file' )
SCREAMING_SNAKE_CASE__ : Dict = is_remote_filesystem(lowerCamelCase__ )
assert is_remote is False
@pytest.mark.parametrize('compression_fs_class' , lowerCamelCase__ )
def _a ( lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : List[str] , lowercase__ : str , lowercase__ : Any , lowercase__ : Dict , lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
SCREAMING_SNAKE_CASE__ : Dict = input_paths[compression_fs_class.protocol]
if input_path is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = f'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] = fsspec.filesystem(compression_fs_class.protocol , fo=lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.basename(lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple = expected_filename[: expected_filename.rindex('.' )]
assert fs.glob('*' ) == [expected_filename]
with fs.open(lowerCamelCase__ , 'r' , encoding='utf-8' ) as f, open(lowerCamelCase__ , encoding='utf-8' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('protocol' , ['zip', 'gzip'] )
def _a ( lowercase__ : Optional[int] , lowercase__ : int , lowercase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
SCREAMING_SNAKE_CASE__ : Dict = compressed_file_paths[protocol]
SCREAMING_SNAKE_CASE__ : Optional[Any] = "dataset.jsonl"
SCREAMING_SNAKE_CASE__ : List[Any] = f'''{protocol}://{member_file_path}::{compressed_file_path}'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = fsspec.get_fs_token_paths(lowerCamelCase__ )
assert fs.isfile(lowerCamelCase__ )
assert not fs.isfile('non_existing_' + member_file_path )
@pytest.mark.integration
def _a ( lowercase__ : List[Any] , lowercase__ : int , lowercase__ : List[str] , lowercase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hf_api.dataset_info(lowerCamelCase__ , token=lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict = HfFileSystem(repo_info=lowerCamelCase__ , token=lowerCamelCase__ )
assert sorted(hffs.glob('*' ) ) == [".gitattributes", "data"]
assert hffs.isdir('data' )
assert hffs.isfile('.gitattributes' ) and hffs.isfile('data/text_data.txt' )
with open(lowerCamelCase__ ) as f:
assert hffs.open('data/text_data.txt' , 'r' ).read() == f.read()
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(lowerCamelCase__ , lowerCamelCase__ , clobber=lowerCamelCase__ )
with pytest.warns(lowerCamelCase__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(lowerCamelCase__ ) == 1
assert (
str(warning_info[0].message )
== f'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 85
|
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __A (__magic_name__ ):
def _snake_case ( self ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _snake_case ( self ):
__UpperCAmelCase : int = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = self._create_example_records()
__UpperCAmelCase : Union[str, Any] = Dataset.from_list(UpperCamelCase_ )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(UpperCamelCase_ ):
self.assertDictEqual(UpperCamelCase_ , example_records[i] )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = self._create_example_records()
__UpperCAmelCase : Optional[Any] = Dataset.from_list(UpperCamelCase_ )
__UpperCAmelCase : Any = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def _snake_case ( self ): # checks what happens with missing columns
__UpperCAmelCase : Optional[int] = [{"col_1": 1}, {"col_2": "x"}]
__UpperCAmelCase : Optional[int] = Dataset.from_list(UpperCamelCase_ )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def _snake_case ( self ): # checks if the type can be inferred from the second record
__UpperCAmelCase : Dict = [{"col_1": []}, {"col_1": [1, 2]}]
__UpperCAmelCase : Optional[int] = Dataset.from_list(UpperCamelCase_ )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = Dataset.from_list([] )
self.assertEqual(len(UpperCamelCase_ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 168
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : str = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Dict = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_snake_case : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 203
|
import unittest
import numpy as np
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , ):
__snake_case : List[str] = np.shape(__lowerCamelCase )
__snake_case : Optional[Any] = np.shape(__lowerCamelCase )
__snake_case : List[str] = np.shape(__lowerCamelCase )
if shape_a[0] != shape_b[0]:
__snake_case : Any = (
"Expected the same number of rows for A and B. "
F'Instead found A of size {shape_a} and B of size {shape_b}'
)
raise ValueError(__lowerCamelCase )
if shape_b[1] != shape_c[1]:
__snake_case : int = (
"Expected the same number of columns for B and C. "
F'Instead found B of size {shape_b} and C of size {shape_c}'
)
raise ValueError(__lowerCamelCase )
__snake_case : str = pseudo_inv
if a_inv is None:
try:
__snake_case : Optional[Any] = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Tuple ) -> None:
__snake_case : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__snake_case : str = np.array([[0, 3], [3, 0], [2, 3]] )
__snake_case : Dict = np.array([[2, 1], [6, 3]] )
__snake_case : Dict = schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__snake_case : int = np.block([[a, b], [b.T, c]] )
__snake_case : Optional[int] = np.linalg.det(lowerCamelCase )
__snake_case : Any = np.linalg.det(lowerCamelCase )
__snake_case : Tuple = np.linalg.det(lowerCamelCase )
self.assertAlmostEqual(lowerCamelCase , det_a * det_s )
def __snake_case ( self : int ) -> None:
__snake_case : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__snake_case : Dict = np.array([[0, 3], [3, 0], [2, 3]] )
__snake_case : Tuple = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowerCamelCase ):
schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __snake_case ( self : List[Any] ) -> None:
__snake_case : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__snake_case : Tuple = np.array([[0, 3], [3, 0], [2, 3]] )
__snake_case : List[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowerCamelCase ):
schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 203
| 1
|
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
A : Any = 2_5_6_0_4_7
A : List[str] = 2_5_6_1_4_5
@require_sentencepiece
@require_tokenizers
class A (SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = NllbTokenizer
__lowerCamelCase : int = NllbTokenizerFast
__lowerCamelCase : List[Any] = True
__lowerCamelCase : List[Any] = True
__lowerCamelCase : int = {}
def a_ ( self : Optional[int] ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
A__ = NllbTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def a_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
A__ = NllbTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase )
A__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def a_ ( self : List[str] ) -> str:
"""simple docstring"""
A__ = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A__ = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
A__ = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(__lowerCAmelCase )
A__ = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
A__ = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase )
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(__lowerCAmelCase )
A__ = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
shutil.rmtree(__lowerCAmelCase )
# Save tokenizer rust, legacy_format=True
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase )
A__ = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase )
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(__lowerCAmelCase )
A__ = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
shutil.rmtree(__lowerCAmelCase )
# Save tokenizer rust, legacy_format=False
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase )
A__ = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(__lowerCAmelCase )
A__ = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
shutil.rmtree(__lowerCAmelCase )
@require_torch
def a_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
if not self.test_seqaseq:
return
A__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Longer text that will definitely require truncation.
A__ = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"""
""" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
A__ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
A__ = tokenizer.prepare_seqaseq_batch(
src_texts=__lowerCAmelCase , tgt_texts=__lowerCAmelCase , max_length=3 , max_target_length=10 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
A__ = tokenizer.prepare_seqaseq_batch(
__lowerCAmelCase , tgt_texts=__lowerCAmelCase , max_length=3 , return_tensors="""pt""" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
A__ = tokenizer.prepare_seqaseq_batch(
src_texts=__lowerCAmelCase , max_length=3 , max_target_length=10 , return_tensors="""pt""" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("""decoder_input_ids""" , __lowerCAmelCase )
@unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" )
def a_ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
def a_ ( self : str ) -> int:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A__ = [AddedToken("""<special>""" , lstrip=__lowerCAmelCase )]
A__ = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase )
A__ = tokenizer_r.encode("""Hey this is a <special> token""" )
A__ = tokenizer_r.encode("""<special>""" , add_special_tokens=__lowerCAmelCase )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
A__ = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
A__ = self.tokenizer_class.from_pretrained(
__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase )
A__ = tokenizer_p.encode("""Hey this is a <special> token""" )
A__ = tokenizer_cr.encode("""Hey this is a <special> token""" )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class A (unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = '''facebook/nllb-200-distilled-600M'''
__lowerCamelCase : Optional[Any] = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
__lowerCamelCase : List[str] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
__lowerCamelCase : List[Any] = [
256047,
16297,
134408,
8165,
248066,
14734,
950,
1135,
105721,
3573,
83,
27352,
108,
49486,
2,
]
@classmethod
def a_ ( cls : Dict ) -> Union[str, Any]:
"""simple docstring"""
A__ = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" )
A__ = 1
return cls
def a_ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 25_60_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 25_60_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 25_60_57 )
def a_ ( self : Dict ) -> Tuple:
"""simple docstring"""
A__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase )
def a_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
self.assertIn(__lowerCAmelCase , self.tokenizer.all_special_ids )
# fmt: off
A__ = [RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47]
# fmt: on
A__ = self.tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
A__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , __lowerCAmelCase )
def a_ ( self : Any ) -> Tuple:
"""simple docstring"""
A__ = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , __lowerCAmelCase )
A__ = 10
A__ = self.tokenizer(__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , __lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
def a_ ( self : str ) -> int:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_62_03, 3] )
def a_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
A__ = tempfile.mkdtemp()
A__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowerCAmelCase )
A__ = NllbTokenizer.from_pretrained(__lowerCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCAmelCase )
@require_torch
def a_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
A__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
A__ = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""] )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
A__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def a_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
A__ = self.tokenizer(self.src_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=3 , return_tensors="""pt""" )
A__ = self.tokenizer(
text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=10 , return_tensors="""pt""" )
A__ = targets["""input_ids"""]
A__ = shift_tokens_right(
__lowerCAmelCase , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a_ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
A__ = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , {
# A, test, EOS, en_XX
"""input_ids""": [[25_60_47, 70, 73_56, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_60_57,
} , )
@require_torch
def a_ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
A__ = True
A__ = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] )
A__ = False
A__ = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] )
| 176
|
def __lowerCamelCase ( __a :str ) -> bool:
"""simple docstring"""
A__ = 0
for ch in input_str:
A__ = ord(__a )
A__ = pow(2 , __a )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 176
| 1
|
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
_lowercase = [
"kernels/rwkv/wkv_cuda.cu",
"kernels/rwkv/wkv_op.cpp",
"kernels/deformable_detr/ms_deform_attn.h",
"kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh",
"models/graphormer/algos_graphormer.pyx",
]
def lowerCAmelCase__ ( UpperCamelCase_ : Any )-> Any:
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument("--check_lib", action="store_true", help="Whether to check the build or the actual package.")
_lowercase = parser.parse_args()
if args.check_lib:
_lowercase = importlib.import_module("transformers")
_lowercase = Path(transformers_module.__file__).parent
else:
_lowercase = Path.cwd() / "build/lib/transformers"
if not test_custom_files_are_present(transformers_path):
raise ValueError("The built release does not contain the custom files. Fix this before going further!")
| 526
|
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase = 16
_lowercase = 32
def lowerCAmelCase__ ( UpperCamelCase_ : Accelerator , UpperCamelCase_ : int = 1_6 )-> List[str]:
A__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(UpperCamelCase_ : Any ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
UpperCamelCase_ , batched=UpperCamelCase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCamelCase_ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 1_6
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
UpperCamelCase_ , padding='''longest''' , max_length=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
A__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowercase = mocked_dataloaders # noqa: F811
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple )-> Union[str, Any]:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , UpperCamelCase_ ) == "1":
A__ = 2
# Initialize accelerator
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config['''lr''']
A__ = int(config['''num_epochs'''] )
A__ = int(config['''seed'''] )
A__ = int(config['''batch_size'''] )
A__ = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=UpperCamelCase_ )
def inner_training_loop(UpperCamelCase_ : Optional[int] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(UpperCamelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=UpperCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=UpperCamelCase_ )
A__ , A__ = get_dataloaders(UpperCamelCase_ , UpperCamelCase_ )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase_ , num_warmup_steps=1_0_0 , num_training_steps=(len(UpperCamelCase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Now we train the model
for epoch in range(UpperCamelCase_ ):
model.train()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**UpperCamelCase_ )
A__ = outputs.loss
accelerator.backward(UpperCamelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**UpperCamelCase_ )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCamelCase_ , references=UpperCamelCase_ , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , UpperCamelCase_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def lowerCAmelCase__ ( )-> Optional[Any]:
A__ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=UpperCamelCase_ , default=UpperCamelCase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
A__ = parser.parse_args()
A__ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 526
| 1
|
from __future__ import annotations
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = str(__SCREAMING_SNAKE_CASE )
return n == n[::-1]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 100_0000 ):
lowercase = 0
for i in range(1 , __SCREAMING_SNAKE_CASE ):
if is_palindrome(__SCREAMING_SNAKE_CASE ) and is_palindrome(bin(__SCREAMING_SNAKE_CASE ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 84
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
__magic_name__ = None
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
__magic_name__ = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""",
},
}
__magic_name__ = {
"""camembert-base""": 5_12,
}
__magic_name__ = """▁"""
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = ["input_ids", "attention_mask"]
snake_case = CamembertTokenizer
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Any="<s>" , SCREAMING_SNAKE_CASE_ : Tuple="</s>" , SCREAMING_SNAKE_CASE_ : Tuple="</s>" , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="<unk>" , SCREAMING_SNAKE_CASE_ : str="<pad>" , SCREAMING_SNAKE_CASE_ : List[str]="<mask>" , SCREAMING_SNAKE_CASE_ : Tuple=["<s>NOTUSED", "</s>NOTUSED"] , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
lowerCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 129
| 0
|
def _snake_case ( __snake_case , __snake_case ):
# Check if the input is valid
if not len(__snake_case ) == len(__snake_case ) == 3:
raise ValueError('''Please enter a valid equation.''' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''' )
# Extract the coefficients
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = equationa
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = equationa
# Calculate the determinants of the matrices
_UpperCamelCase = aa * ba - aa * ba
_UpperCamelCase = ca * ba - ca * ba
_UpperCamelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''' )
else:
raise ValueError('''No solution. (Inconsistent system)''' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_UpperCamelCase = determinant_x / determinant
_UpperCamelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 71
|
from __future__ import annotations
import math
class lowerCAmelCase_ :
def __init__( self : int , _A : int ):
_UpperCamelCase = size
# approximate the overall size of segment tree with given value
_UpperCamelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
_UpperCamelCase = [0 for i in range(0 , 4 * size )]
_UpperCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def UpperCamelCase_ ( self : str , _A : int ):
return idx * 2
def UpperCamelCase_ ( self : Any , _A : int ):
return idx * 2 + 1
def UpperCamelCase_ ( self : Union[str, Any] , _A : int , _A : int , _A : int , _A : list[int] ):
if left_element == right_element:
_UpperCamelCase = a[left_element - 1]
else:
_UpperCamelCase = (left_element + right_element) // 2
self.build(self.left(_A ) , _A , _A , _A )
self.build(self.right(_A ) , mid + 1 , _A , _A )
_UpperCamelCase = max(
self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] )
def UpperCamelCase_ ( self : Tuple , _A : int , _A : int , _A : int , _A : int , _A : int , _A : int ):
if self.flag[idx] is True:
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = False
if left_element != right_element:
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = True
_UpperCamelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
_UpperCamelCase = val
if left_element != right_element:
_UpperCamelCase = val
_UpperCamelCase = val
_UpperCamelCase = True
_UpperCamelCase = True
return True
_UpperCamelCase = (left_element + right_element) // 2
self.update(self.left(_A ) , _A , _A , _A , _A , _A )
self.update(self.right(_A ) , mid + 1 , _A , _A , _A , _A )
_UpperCamelCase = max(
self.segment_tree[self.left(_A )] , self.segment_tree[self.right(_A )] )
return True
def UpperCamelCase_ ( self : Any , _A : int , _A : int , _A : int , _A : int , _A : int ):
if self.flag[idx] is True:
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = False
if left_element != right_element:
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = self.lazy[idx]
_UpperCamelCase = True
_UpperCamelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
_UpperCamelCase = (left_element + right_element) // 2
_UpperCamelCase = self.query(self.left(_A ) , _A , _A , _A , _A )
_UpperCamelCase = self.query(self.right(_A ) , mid + 1 , _A , _A , _A )
return max(_A , _A )
def __str__( self : Tuple ):
return str([self.query(1 , 1 , self.size , _A , _A ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_lowerCAmelCase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_lowerCAmelCase = 15
_lowerCAmelCase = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 71
| 1
|
"""simple docstring"""
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if index == number_of_items:
return 0
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Any = 0
UpperCamelCase : int = knapsack(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 )
if weights[index] <= max_weight:
UpperCamelCase : Union[str, Any] = values[index] + knapsack(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 )
return max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''CLIPFeatureExtractor''']
a_ = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 339
| 0
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class a_ :
UpperCAmelCase : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
UpperCAmelCase : Optional[str] = field(
default=snake_case , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCAmelCase : Optional[str] = field(
default=snake_case , metadata={"""help""": """The column name of the images in the files."""} )
UpperCAmelCase : Optional[str] = field(default=snake_case , metadata={"""help""": """A folder containing the training data."""} )
UpperCAmelCase : Optional[str] = field(default=snake_case , metadata={"""help""": """A folder containing the validation data."""} )
UpperCAmelCase : Optional[float] = field(
default=0.1_5 , metadata={"""help""": """Percent to split off of train for validation."""} )
UpperCAmelCase : Optional[int] = field(
default=snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCAmelCase : Optional[int] = field(
default=snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCamelCase ( self : Optional[Any] ) -> Tuple:
snake_case: Union[str, Any] ={}
if self.train_dir is not None:
snake_case: List[str] =self.train_dir
if self.validation_dir is not None:
snake_case: int =self.validation_dir
snake_case: List[str] =data_files if data_files else None
@dataclass
class a_ :
UpperCAmelCase : str = field(
default=snake_case , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
UpperCAmelCase : Optional[str] = field(
default=snake_case , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
UpperCAmelCase : Optional[str] = field(
default=snake_case , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
UpperCAmelCase : Optional[str] = field(
default=snake_case , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
UpperCAmelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCAmelCase : str = field(default=snake_case , metadata={"""help""": """Name or path of preprocessor config."""} )
UpperCAmelCase : bool = field(
default=snake_case , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
UpperCAmelCase : float = field(
default=0.7_5 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
UpperCAmelCase : bool = field(
default=snake_case , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class a_ ( snake_case ):
UpperCAmelCase : float = field(
default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def a_ ( __UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
snake_case: List[Any] =torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case: Dict =HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case , snake_case , snake_case: Tuple =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case , snake_case , snake_case: Union[str, Any] =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , __UpperCAmelCase , __UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case: List[str] =training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
snake_case: List[Any] =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case: List[str] =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
snake_case: Any =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
snake_case: Any =None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __UpperCAmelCase ) and data_args.train_val_split > 0.0:
snake_case: Dict =ds['train'].train_test_split(data_args.train_val_split )
snake_case: Optional[Any] =split['train']
snake_case: Optional[Any] =split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case: List[str] ={
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
snake_case: Union[str, Any] =ViTMAEConfig.from_pretrained(model_args.config_name , **__UpperCAmelCase )
elif model_args.model_name_or_path:
snake_case: List[str] =ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__UpperCAmelCase )
else:
snake_case: Optional[int] =ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
snake_case: Optional[int] =ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__UpperCAmelCase )
elif model_args.model_name_or_path:
snake_case: Tuple =ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__UpperCAmelCase )
else:
snake_case: Dict =ViTImageProcessor()
# create model
if model_args.model_name_or_path:
snake_case: Optional[int] =ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
snake_case: Union[str, Any] =ViTMAEForPreTraining(__UpperCAmelCase )
if training_args.do_train:
snake_case: Optional[Any] =ds['train'].column_names
else:
snake_case: Optional[Any] =ds['validation'].column_names
if data_args.image_column_name is not None:
snake_case: List[str] =data_args.image_column_name
elif "image" in column_names:
snake_case: Dict ='image'
elif "img" in column_names:
snake_case: Optional[int] ='img'
else:
snake_case: int =column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
snake_case: List[Any] =image_processor.size['shortest_edge']
else:
snake_case: Tuple =(image_processor.size['height'], image_processor.size['width'])
snake_case: Dict =Compose(
[
Lambda(lambda __UpperCAmelCase : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(__UpperCAmelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__UpperCAmelCase ):
snake_case: Tuple =[transforms(__UpperCAmelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
snake_case: Tuple =ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__UpperCAmelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
snake_case: List[Any] =(
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__UpperCAmelCase )
# Compute absolute learning rate
snake_case: Tuple =(
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
snake_case: Union[str, Any] =training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
snake_case: Tuple =Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , )
# Training
if training_args.do_train:
snake_case: List[str] =None
if training_args.resume_from_checkpoint is not None:
snake_case: Any =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case: List[Any] =last_checkpoint
snake_case: Dict =trainer.train(resume_from_checkpoint=__UpperCAmelCase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case: Union[str, Any] =trainer.evaluate()
trainer.log_metrics('eval' , __UpperCAmelCase )
trainer.save_metrics('eval' , __UpperCAmelCase )
# Write model card and (optionally) push to hub
snake_case: Dict ={
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCAmelCase )
else:
trainer.create_model_card(**__UpperCAmelCase )
def a_ ( __UpperCAmelCase ) -> str:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 347
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
a = logging.getLogger(__name__)
class a_ ( snake_case ):
UpperCAmelCase : Any = """sequence-classification"""
def __init__( self : int , a_ : str ) -> str:
if type(a_ ) == dict:
snake_case: List[Any] =Namespace(**a_ )
snake_case: Tuple =glue_output_modes[hparams.task]
snake_case: Any =glue_tasks_num_labels[hparams.task]
super().__init__(a_ , a_ , self.mode )
def UpperCamelCase ( self : Tuple , **a_ : Tuple ) -> Union[str, Any]:
return self.model(**a_ )
def UpperCamelCase ( self : int , a_ : Union[str, Any] , a_ : Optional[int] ) -> Optional[int]:
snake_case: Any ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
snake_case: Optional[int] =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
snake_case: Optional[int] =self(**a_ )
snake_case: Any =outputs[0]
snake_case: Union[str, Any] =self.trainer.lr_schedulers[0]['scheduler']
snake_case: str ={'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCamelCase ( self : str ) -> Tuple:
snake_case: int =self.hparams
snake_case: Union[str, Any] =processors[args.task]()
snake_case: Union[str, Any] =processor.get_labels()
for mode in ["train", "dev"]:
snake_case: Optional[Any] =self._feature_file(a_ )
if os.path.exists(a_ ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , a_ )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
snake_case: int =(
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
snake_case: Tuple =convert_examples_to_features(
a_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , a_ )
torch.save(a_ , a_ )
def UpperCamelCase ( self : List[Any] , a_ : str , a_ : int , a_ : bool = False ) -> DataLoader:
snake_case: List[Any] ='dev' if mode == 'test' else mode
snake_case: Union[str, Any] =self._feature_file(a_ )
logger.info('Loading features from cached file %s' , a_ )
snake_case: Dict =torch.load(a_ )
snake_case: Union[str, Any] =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
snake_case: List[Any] =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
snake_case: str =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
snake_case: Optional[Any] =torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
snake_case: Union[str, Any] =torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(a_ , a_ , a_ , a_ ) , batch_size=a_ , shuffle=a_ , )
def UpperCamelCase ( self : List[str] , a_ : Optional[int] , a_ : Any ) -> Dict:
snake_case: int ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
snake_case: Tuple =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
snake_case: List[str] =self(**a_ )
snake_case , snake_case: str =outputs[:2]
snake_case: Any =logits.detach().cpu().numpy()
snake_case: Union[str, Any] =inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase ( self : int , a_ : Union[str, Any] ) -> tuple:
snake_case: Optional[Any] =torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
snake_case: str =np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
snake_case: Union[str, Any] =np.argmax(a_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
snake_case: Optional[Any] =np.squeeze(a_ )
snake_case: Tuple =np.concatenate([x['target'] for x in outputs] , axis=0 )
snake_case: Any =[[] for _ in range(out_label_ids.shape[0] )]
snake_case: str =[[] for _ in range(out_label_ids.shape[0] )]
snake_case: int ={**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , a_ , a_ )}
snake_case: Union[str, Any] =dict(results.items() )
snake_case: Dict =results
return ret, preds_list, out_label_list
def UpperCamelCase ( self : str , a_ : list ) -> dict:
snake_case , snake_case , snake_case: Union[str, Any] =self._eval_end(a_ )
snake_case: Optional[Any] =ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase ( self : Tuple , a_ : Tuple ) -> dict:
snake_case , snake_case , snake_case: int =self._eval_end(a_ )
snake_case: List[Any] =ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase ( a_ : Optional[int] , a_ : Dict ) -> Tuple:
BaseTransformer.add_model_specific_args(a_ , a_ )
parser.add_argument(
'--max_seq_length' , default=1_2_8 , type=a_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=a_ , required=a_ , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=a_ , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def a_ ( ) -> Any:
"""simple docstring"""
snake_case: Tuple =argparse.ArgumentParser()
add_generic_args(__UpperCAmelCase , os.getcwd() )
snake_case: List[Any] =GLUETransformer.add_model_specific_args(__UpperCAmelCase , os.getcwd() )
snake_case: Optional[int] =parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
snake_case: Optional[int] =os.path.join(
'./results' , f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
snake_case: str =GLUETransformer(__UpperCAmelCase )
snake_case: Tuple =generic_train(__UpperCAmelCase , __UpperCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
snake_case: str =sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__UpperCAmelCase ) )
snake_case: int =model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 347
| 1
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case = MODEL_FOR_CAUSAL_LM_MAPPING
snake_case = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def lowerCamelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
A_ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" )
# Using `do_sample=False` to force deterministic output
A_ = text_generator("This is a test" , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
A_ = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
_snake_case , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
A_ = text_generator("This is a test" , do_sample=_snake_case , num_return_sequences=2 , return_tensors=_snake_case )
self.assertEqual(
_snake_case , [
{"generated_token_ids": ANY(_snake_case )},
{"generated_token_ids": ANY(_snake_case )},
] , )
A_ = text_generator.model.config.eos_token_id
A_ = "<pad>"
A_ = text_generator(
["This is a test", "This is a second test"] , do_sample=_snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=_snake_case , )
self.assertEqual(
_snake_case , [
[
{"generated_token_ids": ANY(_snake_case )},
{"generated_token_ids": ANY(_snake_case )},
],
[
{"generated_token_ids": ANY(_snake_case )},
{"generated_token_ids": ANY(_snake_case )},
],
] , )
@require_tf
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
A_ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" )
# Using `do_sample=False` to force deterministic output
A_ = text_generator("This is a test" , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
A_ = text_generator(["This is a test", "This is a second test"] , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def lowerCamelCase__ ( self : str , _snake_case : List[Any] , _snake_case : Any , _snake_case : Optional[Any] ) -> List[Any]:
"""simple docstring"""
A_ = TextGenerationPipeline(model=_snake_case , tokenizer=_snake_case )
return text_generator, ["This is a test", "Another test"]
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
A_ = "Hello I believe in"
A_ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
A_ = text_generator(_snake_case )
self.assertEqual(
_snake_case , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
A_ = text_generator(_snake_case , stop_sequence=" fe" )
self.assertEqual(_snake_case , [{"generated_text": "Hello I believe in fe"}] )
def lowerCamelCase__ ( self : str , _snake_case : str , _snake_case : str ) -> Optional[Any]:
"""simple docstring"""
A_ = text_generator.model
A_ = text_generator.tokenizer
A_ = text_generator("This is a test" )
self.assertEqual(_snake_case , [{"generated_text": ANY(_snake_case )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
A_ = text_generator("This is a test" , return_full_text=_snake_case )
self.assertEqual(_snake_case , [{"generated_text": ANY(_snake_case )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
A_ = pipeline(task="text-generation" , model=_snake_case , tokenizer=_snake_case , return_full_text=_snake_case )
A_ = text_generator("This is a test" )
self.assertEqual(_snake_case , [{"generated_text": ANY(_snake_case )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
A_ = text_generator("This is a test" , return_full_text=_snake_case )
self.assertEqual(_snake_case , [{"generated_text": ANY(_snake_case )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
A_ = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
[{"generated_text": ANY(_snake_case )}, {"generated_text": ANY(_snake_case )}],
[{"generated_text": ANY(_snake_case )}, {"generated_text": ANY(_snake_case )}],
] , )
if text_generator.tokenizer.pad_token is not None:
A_ = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=_snake_case )
self.assertEqual(
_snake_case , [
[{"generated_text": ANY(_snake_case )}, {"generated_text": ANY(_snake_case )}],
[{"generated_text": ANY(_snake_case )}, {"generated_text": ANY(_snake_case )}],
] , )
with self.assertRaises(_snake_case ):
A_ = text_generator("test" , return_full_text=_snake_case , return_text=_snake_case )
with self.assertRaises(_snake_case ):
A_ = text_generator("test" , return_full_text=_snake_case , return_tensors=_snake_case )
with self.assertRaises(_snake_case ):
A_ = text_generator("test" , return_text=_snake_case , return_tensors=_snake_case )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
A_ = text_generator("" )
self.assertEqual(_snake_case , [{"generated_text": ANY(_snake_case )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
A_ = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
A_ = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 10_000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 500 , max_new_tokens=20 )
A_ = text_generator("This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_snake_case ):
text_generator(
"This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCamelCase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
import torch
# Classic `model_kwargs`
A_ = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
A_ = pipe("This is a test" )
self.assertEqual(
_snake_case , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
A_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
A_ = pipe("This is a test" )
self.assertEqual(
_snake_case , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
A_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
A_ = pipe("This is a test" )
self.assertEqual(
_snake_case , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def lowerCamelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
import torch
A_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCamelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
import torch
A_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa )
pipe("This is a test" , do_sample=_snake_case , top_p=0.5 )
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
A_ = "Hello world"
A_ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
A_ = logging.get_logger("transformers.generation.tf_utils" )
else:
A_ = logging.get_logger("transformers.generation.utils" )
A_ = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_snake_case ) as cl:
A_ = text_generator(_snake_case , max_length=10 , max_new_tokens=1 )
self.assertIn(_snake_case , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_snake_case ) as cl:
A_ = text_generator(_snake_case , max_new_tokens=1 )
self.assertNotIn(_snake_case , cl.out )
with CaptureLogger(_snake_case ) as cl:
A_ = text_generator(_snake_case , max_length=10 )
self.assertNotIn(_snake_case , cl.out )
| 115
|
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
def a_ ( UpperCamelCase_ ):
A_ = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
A_ = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , UpperCamelCase_ )
if matches:
A_ = float(matches[1] )
A_ = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
A_ = 1_0_0_1
A_ = "imagenet-1k-id2label.json"
A_ = "huggingface/label-files"
A_ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="dataset" ) , "r" ) )
A_ = {int(UpperCamelCase_ ) + 1: v for k, v in idalabel.items()}
A_ = "background"
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
return config
def a_ ( ):
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
A_ = get_mobilenet_va_config(UpperCamelCase_ )
# Load 🤗 model
A_ = MobileNetVaForImageClassification(UpperCamelCase_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
A_ = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 3_2} , )
A_ = image_processor(images=prepare_img() , return_tensors="pt" )
A_ = model(**UpperCamelCase_ )
A_ = outputs.logits
assert logits.shape == (1, 1_0_0_1)
if model_name == "mobilenet_v1_1.0_224":
A_ = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
A_ = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
A_ = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , UpperCamelCase_ , atol=1e-4 )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
print("Pushing to the hub..." )
A_ = "google/" + model_name
image_processor.push_to_hub(UpperCamelCase_ )
model.push_to_hub(UpperCamelCase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 452
| 0
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
a__ : int
a__ : TreeNode | None = None
a__ : TreeNode | None = None
__lowercase = namedtuple('''CoinsDistribResult''', '''moves excess''')
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(SCREAMING_SNAKE_CASE ) != count_coins(SCREAMING_SNAKE_CASE ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(SCREAMING_SNAKE_CASE ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__UpperCamelCase :List[str] = get_distrib(node.left )
__UpperCamelCase :Tuple = get_distrib(node.right )
__UpperCamelCase :List[str] = 1 - left_distrib_excess
__UpperCamelCase :int = 1 - right_distrib_excess
__UpperCamelCase :Optional[int] = (
left_distrib_moves
+ right_distrib_moves
+ abs(SCREAMING_SNAKE_CASE )
+ abs(SCREAMING_SNAKE_CASE )
)
__UpperCamelCase :Dict = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return get_distrib(SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__lowercase = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 452
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__A = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__A = TaTokenizerFast
__A = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__A = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 593
|
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowercase__ =logging.get_logger(__name__)
# General docstring
lowercase__ ='PoolFormerConfig'
# Base docstring
lowercase__ ='sail/poolformer_s12'
lowercase__ =[1, 5_12, 7, 7]
# Image classification docstring
lowercase__ ='sail/poolformer_s12'
lowercase__ ='tabby, tabby cat'
lowercase__ =[
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def UpperCamelCase_ ( A__ , A__ = 0.0 , A__ = False ):
if drop_prob == 0.0 or not training:
return input
a_ = 1 - drop_prob
a_ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
a_ = keep_prob + torch.rand(A__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
a_ = input.div(A__ ) * random_tensor
return output
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase = None ):
super().__init__()
a_ = drop_prob
def lowerCAmelCase__ ( self , UpperCAmelCase ):
return drop_path(UpperCAmelCase , self.drop_prob , self.training )
def lowerCAmelCase__ ( self ):
return "p={}".format(self.drop_prob )
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ):
super().__init__()
a_ = patch_size if isinstance(UpperCAmelCase , collections.abc.Iterable ) else (patch_size, patch_size)
a_ = stride if isinstance(UpperCAmelCase , collections.abc.Iterable ) else (stride, stride)
a_ = padding if isinstance(UpperCAmelCase , collections.abc.Iterable ) else (padding, padding)
a_ = nn.Convad(UpperCAmelCase , UpperCAmelCase , kernel_size=UpperCAmelCase , stride=UpperCAmelCase , padding=UpperCAmelCase )
a_ = norm_layer(UpperCAmelCase ) if norm_layer else nn.Identity()
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = self.projection(UpperCAmelCase )
a_ = self.norm(UpperCAmelCase )
return embeddings
class a_ ( nn.GroupNorm ):
def __init__( self , UpperCAmelCase , **UpperCAmelCase ):
super().__init__(1 , UpperCAmelCase , **UpperCAmelCase )
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase ):
super().__init__()
a_ = nn.AvgPoolad(UpperCAmelCase , stride=1 , padding=pool_size // 2 , count_include_pad=UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase ):
return self.pool(UpperCAmelCase ) - hidden_states
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
super().__init__()
a_ = nn.Convad(UpperCAmelCase , UpperCAmelCase , 1 )
a_ = nn.Convad(UpperCAmelCase , UpperCAmelCase , 1 )
a_ = PoolFormerDropPath(UpperCAmelCase )
if isinstance(config.hidden_act , UpperCAmelCase ):
a_ = ACTaFN[config.hidden_act]
else:
a_ = config.hidden_act
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = self.conva(UpperCAmelCase )
a_ = self.act_fn(UpperCAmelCase )
a_ = self.drop(UpperCAmelCase )
a_ = self.conva(UpperCAmelCase )
a_ = self.drop(UpperCAmelCase )
return hidden_states
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
super().__init__()
a_ = PoolFormerPooling(UpperCAmelCase )
a_ = PoolFormerOutput(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
a_ = PoolFormerGroupNorm(UpperCAmelCase )
a_ = PoolFormerGroupNorm(UpperCAmelCase )
# Useful for training neural nets
a_ = PoolFormerDropPath(UpperCAmelCase ) if drop_path > 0.0 else nn.Identity()
a_ = config.use_layer_scale
if config.use_layer_scale:
a_ = nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCAmelCase) ) , requires_grad=UpperCAmelCase )
a_ = nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCAmelCase) ) , requires_grad=UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase ):
if self.use_layer_scale:
a_ = self.pooling(self.before_norm(UpperCAmelCase ) )
a_ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
a_ = hidden_states + self.drop_path(UpperCAmelCase )
a_ = ()
a_ = self.output(self.after_norm(UpperCAmelCase ) )
a_ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
a_ = hidden_states + self.drop_path(UpperCAmelCase )
a_ = (output,) + outputs
return outputs
else:
a_ = self.drop_path(self.pooling(self.before_norm(UpperCAmelCase ) ) )
# First residual connection
a_ = pooling_output + hidden_states
a_ = ()
# Second residual connection inside the PoolFormerOutput block
a_ = self.drop_path(self.output(self.after_norm(UpperCAmelCase ) ) )
a_ = hidden_states + layer_output
a_ = (output,) + outputs
return outputs
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase ):
super().__init__()
a_ = config
# stochastic depth decay rule
a_ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
a_ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
a_ = nn.ModuleList(UpperCAmelCase )
# Transformer blocks
a_ = []
a_ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
a_ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
UpperCAmelCase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(UpperCAmelCase ) )
a_ = nn.ModuleList(UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=True ):
a_ = () if output_hidden_states else None
a_ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
a_ , a_ = layers
# Get patch embeddings from hidden_states
a_ = embedding_layer(UpperCAmelCase )
# Send the embeddings through the blocks
for _, blk in enumerate(UpperCAmelCase ):
a_ = blk(UpperCAmelCase )
a_ = layer_outputs[0]
if output_hidden_states:
a_ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=UpperCAmelCase , hidden_states=UpperCAmelCase )
class a_ ( UpperCamelCase__ ):
lowerCamelCase__ : Union[str, Any] = PoolFormerConfig
lowerCamelCase__ : Optional[Any] = 'poolformer'
lowerCamelCase__ : List[Any] = 'pixel_values'
lowerCamelCase__ : int = True
def lowerCAmelCase__ ( self , UpperCAmelCase ):
if isinstance(UpperCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCAmelCase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=False ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
a_ = value
lowercase__ =r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowercase__ =r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , UpperCamelCase__ , )
class a_ ( UpperCamelCase__ ):
def __init__( self , UpperCAmelCase ):
super().__init__(UpperCAmelCase )
a_ = config
a_ = PoolFormerEncoder(UpperCAmelCase )
# Initialize weights and apply final processing
self.post_init()
def lowerCAmelCase__ ( self ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase__ ( self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ):
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
a_ = self.encoder(
UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase , )
a_ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase ):
super().__init__()
a_ = nn.Linear(config.hidden_size , config.hidden_size )
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = self.dense(UpperCAmelCase )
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , UpperCamelCase__ , )
class a_ ( UpperCamelCase__ ):
def __init__( self , UpperCAmelCase ):
super().__init__(UpperCAmelCase )
a_ = config.num_labels
a_ = PoolFormerModel(UpperCAmelCase )
# Final norm
a_ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
a_ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase__ ( self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ):
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = self.poolformer(
UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase , )
a_ = outputs[0]
a_ = self.classifier(self.norm(UpperCAmelCase ).mean([-2, -1] ) )
a_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
a_ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
a_ = """single_label_classification"""
else:
a_ = """multi_label_classification"""
if self.config.problem_type == "regression":
a_ = MSELoss()
if self.num_labels == 1:
a_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
a_ = loss_fct(UpperCAmelCase , UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
a_ = CrossEntropyLoss()
a_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
a_ = BCEWithLogitsLoss()
a_ = loss_fct(UpperCAmelCase , UpperCAmelCase )
if not return_dict:
a_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCAmelCase , logits=UpperCAmelCase , hidden_states=outputs.hidden_states )
| 263
| 0
|
'''simple docstring'''
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase_ ( lowercase_ ):
def __init__( self : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
@torch.no_grad()
def __call__( self : List[Any] , lowerCAmelCase__ : Union[str, Any] = 1 , lowerCAmelCase__ : str = None , lowerCAmelCase__ : str = 50 , lowerCAmelCase__ : Optional[int] = "pil" , lowerCAmelCase__ : List[str] = True , **lowerCAmelCase__ : List[Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE : Optional[Any] = image.to(self.device )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE : Union[str, Any] = self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
SCREAMING_SNAKE_CASE : str = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Dict = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=lowerCAmelCase__ ), "This is a local test"
| 720
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
lowerCAmelCase_ : List[str] = logging.get_logger(__name__)
def UpperCAmelCase ( A : Optional[Any] , A : List[Any] , A : Any , A : Dict=False ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
SCREAMING_SNAKE_CASE : List[str] = os.path.abspath(A )
logger.info(F"""Loading PyTorch weights from {pt_path}""" )
SCREAMING_SNAKE_CASE : List[str] = torch.load(A , map_location='''cpu''' )
logger.info(F"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
SCREAMING_SNAKE_CASE : Any = convert_pytorch_state_dict_to_flax(A , A )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
SCREAMING_SNAKE_CASE : List[str] = convert_pytorch_sharded_state_dict_to_flax(A , A )
return flax_state_dict
def UpperCAmelCase ( A : Tuple[str] , A : np.ndarray , A : Dict[str, jnp.ndarray] , A : str , ):
def is_key_or_prefix_key_in_dict(A : Tuple[str] ) -> bool:
return len(set(A ) & {key, (model_prefix,) + key} ) > 0
# layer norm
SCREAMING_SNAKE_CASE : List[str] = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(A ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
SCREAMING_SNAKE_CASE : List[Any] = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(A ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
SCREAMING_SNAKE_CASE : Dict = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(A ):
return renamed_pt_tuple_key, pt_tensor
# embedding
SCREAMING_SNAKE_CASE : Any = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(A ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
SCREAMING_SNAKE_CASE : Any = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
SCREAMING_SNAKE_CASE : Any = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(A ):
SCREAMING_SNAKE_CASE : Any = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
SCREAMING_SNAKE_CASE : Optional[Any] = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
SCREAMING_SNAKE_CASE : Optional[Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
SCREAMING_SNAKE_CASE : Optional[Any] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
SCREAMING_SNAKE_CASE : Optional[int] = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
SCREAMING_SNAKE_CASE : Dict = pt_tuple_key[-2] + '''_v'''
if name is not None:
SCREAMING_SNAKE_CASE : str = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase ( A : Union[str, Any] , A : List[str] ):
# convert pytorch tensor to numpy
SCREAMING_SNAKE_CASE : str = {k: v.numpy() for k, v in pt_state_dict.items()}
SCREAMING_SNAKE_CASE : str = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
SCREAMING_SNAKE_CASE : Tuple = flax_model.params['''params''']
else:
SCREAMING_SNAKE_CASE : Optional[int] = flax_model.params
SCREAMING_SNAKE_CASE : int = flatten_dict(A )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
SCREAMING_SNAKE_CASE : int = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(A )
SCREAMING_SNAKE_CASE : Any = {}
SCREAMING_SNAKE_CASE : int = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
SCREAMING_SNAKE_CASE : Optional[Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
SCREAMING_SNAKE_CASE : str = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
SCREAMING_SNAKE_CASE : List[str] = pt_tuple_key[1:]
# Correctly rename weight parameters
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = rename_key_and_reshape_tensor(
A , A , A , A )
# add model prefix if necessary
SCREAMING_SNAKE_CASE : str = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
SCREAMING_SNAKE_CASE : List[str] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
SCREAMING_SNAKE_CASE : str = jnp.asarray(A )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(A , A )
continue
# also add unexpected weight so that warning is thrown
SCREAMING_SNAKE_CASE : str = jnp.asarray(A )
else:
# also add unexpected weight so that warning is thrown
SCREAMING_SNAKE_CASE : Tuple = jnp.asarray(A )
return unflatten_dict(A )
def UpperCAmelCase ( A : Any , A : Union[str, Any] ):
import torch
# Load the index
SCREAMING_SNAKE_CASE : str = {}
for shard_file in shard_filenames:
# load using msgpack utils
SCREAMING_SNAKE_CASE : Optional[int] = torch.load(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
SCREAMING_SNAKE_CASE : List[str] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
SCREAMING_SNAKE_CASE : Tuple = flax_model.params['''params''']
SCREAMING_SNAKE_CASE : Optional[int] = flatten_dict(A )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
SCREAMING_SNAKE_CASE : Optional[int] = flax_model.params
SCREAMING_SNAKE_CASE : Union[str, Any] = flatten_dict(A )
SCREAMING_SNAKE_CASE : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
SCREAMING_SNAKE_CASE : List[str] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
SCREAMING_SNAKE_CASE : Tuple = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
SCREAMING_SNAKE_CASE : str = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
SCREAMING_SNAKE_CASE : Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = rename_key_and_reshape_tensor(
A , A , A , A )
# add model prefix if necessary
SCREAMING_SNAKE_CASE : Dict = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
SCREAMING_SNAKE_CASE : Tuple = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
SCREAMING_SNAKE_CASE : List[str] = jnp.asarray(A )
continue
if "var" in flax_key[-1]:
SCREAMING_SNAKE_CASE : Any = jnp.asarray(A )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(A , A )
continue
# also add unexpected weight so that warning is thrown
SCREAMING_SNAKE_CASE : Any = jnp.asarray(A )
else:
# also add unexpected weight so that warning is thrown
SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(A )
return unflatten_dict(A )
def UpperCAmelCase ( A : List[str] , A : str ):
SCREAMING_SNAKE_CASE : str = os.path.abspath(A )
logger.info(F"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(A , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(A , '''rb''' ) as state_f:
try:
SCREAMING_SNAKE_CASE : Optional[int] = from_bytes(A , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(A , A )
def UpperCAmelCase ( A : int , A : int ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
SCREAMING_SNAKE_CASE : Any = flatten_dict(jax.tree_util.tree_map(lambda A : x.dtype == jnp.bfloataa , A ) ).values()
if any(A ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
SCREAMING_SNAKE_CASE : int = jax.tree_util.tree_map(
lambda A : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , A )
SCREAMING_SNAKE_CASE : Union[str, Any] = flatten_dict(A )
SCREAMING_SNAKE_CASE : str = pt_model.state_dict()
SCREAMING_SNAKE_CASE : Optional[Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
SCREAMING_SNAKE_CASE : Optional[Any] = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : Any = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
SCREAMING_SNAKE_CASE : List[str] = flax_key_tuple[0] == pt_model.base_model_prefix
SCREAMING_SNAKE_CASE : Optional[Any] = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
SCREAMING_SNAKE_CASE : Union[str, Any] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
SCREAMING_SNAKE_CASE : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(A ) not in pt_model_dict:
# conv layer
SCREAMING_SNAKE_CASE : Tuple = flax_key_tuple[:-1] + ('''weight''',)
SCREAMING_SNAKE_CASE : List[Any] = jnp.transpose(A , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(A ) not in pt_model_dict:
# linear layer
SCREAMING_SNAKE_CASE : List[str] = flax_key_tuple[:-1] + ('''weight''',)
SCREAMING_SNAKE_CASE : List[Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
SCREAMING_SNAKE_CASE : List[Any] = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
SCREAMING_SNAKE_CASE : Dict = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
SCREAMING_SNAKE_CASE : Optional[int] = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
SCREAMING_SNAKE_CASE : Any = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
SCREAMING_SNAKE_CASE : List[str] = '''.'''.join(A )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
SCREAMING_SNAKE_CASE : Any = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
SCREAMING_SNAKE_CASE : Tuple = key.split('''.''' )
SCREAMING_SNAKE_CASE : str = None
if key_components[-3::2] == ["parametrizations", "original0"]:
SCREAMING_SNAKE_CASE : List[Any] = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
SCREAMING_SNAKE_CASE : Any = key_components[-2] + '''_v'''
if name is not None:
SCREAMING_SNAKE_CASE : List[Any] = key_components[:-3] + [name]
SCREAMING_SNAKE_CASE : Union[str, Any] = '''.'''.join(A )
SCREAMING_SNAKE_CASE : Optional[int] = key
if flax_key in special_pt_names:
SCREAMING_SNAKE_CASE : Dict = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
SCREAMING_SNAKE_CASE : Optional[int] = np.asarray(A ) if not isinstance(A , np.ndarray ) else flax_tensor
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(A )
# remove from missing keys
missing_keys.remove(A )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(A )
pt_model.load_state_dict(A )
# re-transform missing_keys to list
SCREAMING_SNAKE_CASE : Tuple = list(A )
if len(A ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(F"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(A ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
''' use it for predictions and inference.''' )
else:
logger.warning(
F"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
'''If your task is similar to the task the model of the checkpoint was trained on, '''
F"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 464
| 0
|
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = TaConfig.from_json_file(lowerCAmelCase__ )
print(F'Building PyTorch model from configuration: {config}' )
_lowerCamelCase : int = TaForConditionalGeneration(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCAmelCase_ : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 44
|
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
__UpperCAmelCase = logging.get_logger(__name__)
class __UpperCAmelCase ( _UpperCamelCase ):
def UpperCAmelCase ( self : int , a_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(a_ , a_ ):
a__ : Any = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__( self : Union[str, Any] , a_ : Tuple , a_ : Optional[Any] , a_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
if len(a_ ) == 0 or len(a_ ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(a_ ) )
if isinstance(a_ , a_ ):
a__ : str = [sequences]
a__ : Optional[int] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(a_ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(_UpperCamelCase )
class __UpperCAmelCase ( _UpperCamelCase ):
def __init__( self : str , a_ : Optional[Any]=ZeroShotClassificationArgumentHandler() , *a_ : Tuple , **a_ : str ) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] = args_parser
super().__init__(*a_ , **a_ )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def UpperCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def UpperCAmelCase ( self : Optional[int] , a_ : List[Any] , a_ : int=True , a_ : Tuple=True , a_ : Tuple=TruncationStrategy.ONLY_FIRST , **a_ : Dict ) -> List[Any]:
'''simple docstring'''
a__ : str = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
a__ : List[str] = self.tokenizer.eos_token
try:
a__ : List[str] = self.tokenizer(
a_ , add_special_tokens=a_ , return_tensors=a_ , padding=a_ , truncation=a_ , )
except Exception as e:
if "too short" in str(a_ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
a__ : List[str] = self.tokenizer(
a_ , add_special_tokens=a_ , return_tensors=a_ , padding=a_ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def UpperCAmelCase ( self : Tuple , **a_ : Tuple ) -> Optional[int]:
'''simple docstring'''
if kwargs.get("multi_class" , a_ ) is not None:
a__ : str = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
a__ : Tuple = {}
if "candidate_labels" in kwargs:
a__ : Any = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
a__ : str = kwargs["hypothesis_template"]
a__ : Tuple = {}
if "multi_label" in kwargs:
a__ : Dict = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self : str , a_ : Union[str, List[str]] , *a_ : List[str] , **a_ : List[Any] , ) -> Tuple:
'''simple docstring'''
if len(a_ ) == 0:
pass
elif len(a_ ) == 1 and "candidate_labels" not in kwargs:
a__ : Any = args[0]
else:
raise ValueError(F"Unable to understand extra arguments {args}" )
return super().__call__(a_ , **a_ )
def UpperCAmelCase ( self : Optional[int] , a_ : Tuple , a_ : Any=None , a_ : Dict="This example is {}." ) -> Optional[int]:
'''simple docstring'''
a__ , a__ : Optional[Any] = self._args_parser(a_ , a_ , a_ )
for i, (candidate_label, sequence_pair) in enumerate(zip(a_ , a_ ) ):
a__ : Union[str, Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(a_ ) - 1,
**model_input,
}
def UpperCAmelCase ( self : Optional[int] , a_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
a__ : Dict = inputs["candidate_label"]
a__ : Optional[int] = inputs["sequence"]
a__ : Optional[int] = {k: inputs[k] for k in self.tokenizer.model_input_names}
a__ : int = self.model(**a_ )
a__ : Optional[int] = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def UpperCAmelCase ( self : Dict , a_ : Any , a_ : List[str]=False ) -> Union[str, Any]:
'''simple docstring'''
a__ : int = [outputs["candidate_label"] for outputs in model_outputs]
a__ : Optional[int] = [outputs["sequence"] for outputs in model_outputs]
a__ : Union[str, Any] = np.concatenate([output["logits"].numpy() for output in model_outputs] )
a__ : List[str] = logits.shape[0]
a__ : Optional[int] = len(a_ )
a__ : List[str] = N // n
a__ : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(a_ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
a__ : str = self.entailment_id
a__ : str = -1 if entailment_id == 0 else 0
a__ : str = reshaped_outputs[..., [contradiction_id, entailment_id]]
a__ : List[Any] = np.exp(a_ ) / np.exp(a_ ).sum(-1 , keepdims=a_ )
a__ : str = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
a__ : str = reshaped_outputs[..., self.entailment_id]
a__ : Optional[int] = np.exp(a_ ) / np.exp(a_ ).sum(-1 , keepdims=a_ )
a__ : List[str] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 642
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __A ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase__ = """decision_transformer"""
UpperCAmelCase__ = ["""past_key_values"""]
UpperCAmelCase__ = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , a__=17 , a__=4 , a__=128 , a__=4096 , a__=True , a__=1 , a__=1024 , a__=3 , a__=1 , a__=None , a__="relu" , a__=0.1 , a__=0.1 , a__=0.1 , a__=1e-5 , a__=0.02 , a__=True , a__=True , a__=5_0256 , a__=5_0256 , a__=False , a__=False , **a__ , ):
"""simple docstring"""
_lowerCamelCase : int = state_dim
_lowerCamelCase : Optional[int] = act_dim
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : Any = max_ep_len
_lowerCamelCase : List[str] = action_tanh
_lowerCamelCase : str = vocab_size
_lowerCamelCase : Tuple = n_positions
_lowerCamelCase : List[Any] = n_layer
_lowerCamelCase : str = n_head
_lowerCamelCase : Union[str, Any] = n_inner
_lowerCamelCase : List[str] = activation_function
_lowerCamelCase : int = resid_pdrop
_lowerCamelCase : Any = embd_pdrop
_lowerCamelCase : str = attn_pdrop
_lowerCamelCase : Tuple = layer_norm_epsilon
_lowerCamelCase : int = initializer_range
_lowerCamelCase : List[str] = scale_attn_weights
_lowerCamelCase : List[Any] = use_cache
_lowerCamelCase : List[Any] = scale_attn_by_inverse_layer_idx
_lowerCamelCase : Optional[int] = reorder_and_upcast_attn
_lowerCamelCase : Dict = bos_token_id
_lowerCamelCase : Tuple = eos_token_id
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__)
| 712
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __UpperCAmelCase( lowercase_ ): # picklable for multiprocessing
return x.sum()
def __UpperCAmelCase( lowercase_ ): # picklable for multiprocessing
return i + 1
@dataclass
class __A :
"""simple docstring"""
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
class __A ( lowerCamelCase__ ):
"""simple docstring"""
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Dict = []
_lowerCamelCase : Optional[Any] = 1
_lowerCamelCase : Optional[int] = [1, 2]
_lowerCamelCase : str = {'''a''': 1, '''b''': 2}
_lowerCamelCase : Dict = {'''a''': [1, 2], '''b''': [3, 4]}
_lowerCamelCase : Any = {'''a''': {'''1''': 1}, '''b''': 2}
_lowerCamelCase : Optional[Any] = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
_lowerCamelCase : str = {}
_lowerCamelCase : int = []
_lowerCamelCase : str = 2
_lowerCamelCase : int = [2, 3]
_lowerCamelCase : str = {'''a''': 2, '''b''': 3}
_lowerCamelCase : Tuple = {'''a''': [2, 3], '''b''': [4, 5]}
_lowerCamelCase : List[str] = {'''a''': {'''1''': 2}, '''b''': 3}
_lowerCamelCase : str = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(a__ , a__) , a__)
self.assertEqual(map_nested(a__ , a__) , a__)
self.assertEqual(map_nested(a__ , a__) , a__)
self.assertEqual(map_nested(a__ , a__) , a__)
self.assertEqual(map_nested(a__ , a__) , a__)
self.assertEqual(map_nested(a__ , a__) , a__)
self.assertEqual(map_nested(a__ , a__) , a__)
self.assertEqual(map_nested(a__ , a__) , a__)
_lowerCamelCase : Dict = 2
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
self.assertEqual(map_nested(a__ , a__ , num_proc=a__) , a__)
_lowerCamelCase : Any = {'''a''': np.eye(2), '''b''': np.zeros(3), '''c''': np.ones(2)}
_lowerCamelCase : Optional[int] = {'''a''': 2, '''b''': 0, '''c''': 2}
_lowerCamelCase : Optional[int] = {
'''a''': np.eye(2).astype(a__),
'''b''': np.zeros(3).astype(a__),
'''c''': np.ones(2).astype(a__),
}
self.assertEqual(map_nested(a__ , a__ , map_numpy=a__) , a__)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(a__ , a__ , map_numpy=a__).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(a__ , a__ , map_numpy=a__ , num_proc=a__) , a__)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(a__ , a__ , map_numpy=a__ , num_proc=a__).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(a__): # can't pickle a local lambda
map_nested(lambda a__: x + 1 , a__ , num_proc=a__)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = {'''a''': 1, '''b''': 2}
_lowerCamelCase : Optional[int] = {'''a''': 3, '''b''': 4}
_lowerCamelCase : int = {'''a''': 5, '''b''': 6}
_lowerCamelCase : Optional[int] = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))])
self.assertEqual(sorted(zip_dict(a__ , a__ , a__)) , a__)
def __snake_case ( self):
"""simple docstring"""
class __A :
"""simple docstring"""
UpperCAmelCase__ = """bar"""
_lowerCamelCase : Any = Foo()
self.assertEqual(foo.my_attr , '''bar''')
with temporary_assignment(a__ , '''my_attr''' , '''BAR'''):
self.assertEqual(foo.my_attr , '''BAR''')
self.assertEqual(foo.my_attr , '''bar''')
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ ):
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
_lowerCamelCase : Union[str, Any] = {F"""{i}""": i for i in range(lowercase_ )}
_lowerCamelCase : List[str] = map_nested(lambda lowercase_ : x + 10 , lowercase_ , num_proc=lowercase_ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __A ( lowerCamelCase__ ):
"""simple docstring"""
@require_tf
def __snake_case ( self):
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
_lowerCamelCase : int = layers.Dense(2)
def gen_random_output():
_lowerCamelCase : Union[str, Any] = tf.random.uniform((1, 3))
return model(a__).numpy()
with temp_seed(42 , set_tensorflow=a__):
_lowerCamelCase : List[str] = gen_random_output()
with temp_seed(42 , set_tensorflow=a__):
_lowerCamelCase : Any = gen_random_output()
_lowerCamelCase : str = gen_random_output()
np.testing.assert_equal(a__ , a__)
self.assertGreater(np.abs(outa - outa).sum() , 0)
@require_torch
def __snake_case ( self):
"""simple docstring"""
import torch
def gen_random_output():
_lowerCamelCase : Union[str, Any] = torch.nn.Linear(3 , 2)
_lowerCamelCase : Dict = torch.rand(1 , 3)
return model(a__).detach().numpy()
with temp_seed(42 , set_pytorch=a__):
_lowerCamelCase : Any = gen_random_output()
with temp_seed(42 , set_pytorch=a__):
_lowerCamelCase : Optional[int] = gen_random_output()
_lowerCamelCase : Union[str, Any] = gen_random_output()
np.testing.assert_equal(a__ , a__)
self.assertGreater(np.abs(outa - outa).sum() , 0)
def __snake_case ( self):
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3)
with temp_seed(42):
_lowerCamelCase : Union[str, Any] = gen_random_output()
with temp_seed(42):
_lowerCamelCase : List[str] = gen_random_output()
_lowerCamelCase : str = gen_random_output()
np.testing.assert_equal(a__ , a__)
self.assertGreater(np.abs(outa - outa).sum() , 0)
@pytest.mark.parametrize('''input_data''' , [{}] )
def __UpperCAmelCase( lowercase_ ):
_lowerCamelCase : List[Any] = NestedDataStructure(lowercase_ ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''' , [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
] , )
def __UpperCAmelCase( lowercase_ , lowercase_ ):
_lowerCamelCase : int = NestedDataStructure(lowercase_ ).flatten()
assert output == expected_output
def __UpperCAmelCase( ):
_lowerCamelCase : Any = A(x=1 , y='''foobar''' )
_lowerCamelCase : Union[str, Any] = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(lowercase_ ) == expected_output
_lowerCamelCase : Optional[int] = {'''a''': {'''b''': A(x=10 , y='''foo''' )}, '''c''': [A(x=20 , y='''bar''' )]}
_lowerCamelCase : Union[str, Any] = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(lowercase_ ) == expected_output
with pytest.raises(lowercase_ ):
asdict([1, A(x=10 , y='''foo''' )] )
def __UpperCAmelCase( lowercase_ ):
return text.split()
def __UpperCAmelCase( lowercase_ ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __UpperCAmelCase( ):
with Pool(2 ) as pool:
_lowerCamelCase : Tuple = list(iflatmap_unordered(lowercase_ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(lowercase_ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_lowerCamelCase : Dict = list(iflatmap_unordered(lowercase_ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(lowercase_ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_lowerCamelCase : str = []
for yield_time, content in iflatmap_unordered(
lowercase_ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowercase_ )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(lowercase_ ) == 4
| 613
| 0
|
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( lowercase__ : Dict, lowercase__ : int ):
'''simple docstring'''
__lowercase =sorted(numsa + numsa )
__lowercase , __lowercase =divmod(len(UpperCamelCase__ ), 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase = [float(x) for x in input('''Enter the elements of first array: ''').split()]
UpperCAmelCase = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 119
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : Tuple = logging.get_logger(__name__)
__A : int = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class A_ (a_ , a_ ):
UpperCAmelCase__ = '''focalnet'''
def __init__( self , _A=2_2_4 , _A=4 , _A=3 , _A=9_6 , _A=False , _A=[1_9_2, 3_8_4, 7_6_8, 7_6_8] , _A=[2, 2, 6, 2] , _A=[2, 2, 2, 2] , _A=[3, 3, 3, 3] , _A="gelu" , _A=4.0 , _A=0.0 , _A=0.1 , _A=False , _A=1E-4 , _A=False , _A=False , _A=False , _A=0.02 , _A=1E-5 , _A=3_2 , _A=None , _A=None , **_A , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = embed_dim
UpperCAmelCase = use_conv_embed
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = focal_levels
UpperCAmelCase = focal_windows
UpperCAmelCase = hidden_act
UpperCAmelCase = mlp_ratio
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = drop_path_rate
UpperCAmelCase = use_layerscale
UpperCAmelCase = layerscale_value
UpperCAmelCase = use_post_layernorm
UpperCAmelCase = use_post_layernorm_in_modulation
UpperCAmelCase = normalize_modulator
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = encoder_stride
UpperCAmelCase = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=_A , out_indices=_A , stage_names=self.stage_names )
| 130
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ : Tuple = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[Any] = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[str] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 156
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ : List[str] = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[Any] = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Dict = ['''LayoutLMv2FeatureExtractor''']
lowerCAmelCase_ : str = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Any = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 156
| 1
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=64 , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> Any:
__magic_name__ : int = parent
__magic_name__ : str = batch_size
__magic_name__ : Union[str, Any] = seq_length
__magic_name__ : Any = is_training
__magic_name__ : Dict = use_input_mask
__magic_name__ : List[str] = use_token_type_ids
__magic_name__ : Tuple = use_labels
__magic_name__ : Optional[int] = vocab_size
__magic_name__ : str = hidden_size
__magic_name__ : List[str] = embedding_size
__magic_name__ : Optional[int] = num_hidden_layers
__magic_name__ : Optional[int] = num_attention_heads
__magic_name__ : List[str] = intermediate_size
__magic_name__ : int = hidden_act
__magic_name__ : int = hidden_dropout_prob
__magic_name__ : List[Any] = attention_probs_dropout_prob
__magic_name__ : Dict = max_position_embeddings
__magic_name__ : Optional[Any] = type_vocab_size
__magic_name__ : Optional[int] = type_sequence_label_size
__magic_name__ : Tuple = initializer_range
__magic_name__ : str = num_labels
__magic_name__ : Tuple = num_choices
__magic_name__ : Union[str, Any] = scope
def __magic_name__ ( self ) -> Any:
__magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : int = None
if self.use_input_mask:
__magic_name__ : int = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : str = None
if self.use_token_type_ids:
__magic_name__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : Dict = None
__magic_name__ : List[str] = None
__magic_name__ : str = None
if self.use_labels:
__magic_name__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : Any = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self ) -> List[str]:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : Optional[Any] = MegatronBertModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
__magic_name__ : int = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : List[str] = MegatronBertForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
__magic_name__ : Tuple = MegatronBertForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = MegatronBertForNextSentencePrediction(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : int = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : str = MegatronBertForPreTraining(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Union[str, Any] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , next_sentence_label=lowerCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Dict = MegatronBertForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : List[Any] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Any = self.num_labels
__magic_name__ : Optional[int] = MegatronBertForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
__magic_name__ : Union[str, Any] = self.num_labels
__magic_name__ : int = MegatronBertForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
__magic_name__ : Optional[Any] = self.num_choices
__magic_name__ : Tuple = MegatronBertForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ : Tuple = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Any = self.prepare_config_and_inputs()
(
(
__magic_name__
) ,(
__magic_name__
) ,(
__magic_name__
) ,(
__magic_name__
) ,(
__magic_name__
) ,(
__magic_name__
) ,(
__magic_name__
) ,
) : Tuple = config_and_inputs
__magic_name__ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Tuple = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase__ : Tuple = (
{
'''feature-extraction''': MegatronBertModel,
'''fill-mask''': MegatronBertForMaskedLM,
'''question-answering''': MegatronBertForQuestionAnswering,
'''text-classification''': MegatronBertForSequenceClassification,
'''text-generation''': MegatronBertForCausalLM,
'''token-classification''': MegatronBertForTokenClassification,
'''zero-shot''': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ : Optional[int] = True
# test_resize_embeddings = False
lowercase__ : Tuple = False
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[Any]:
__magic_name__ : int = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
__magic_name__ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ )
__magic_name__ : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Dict = MegatronBertModelTester(self )
__magic_name__ : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def __magic_name__ ( self ) -> List[str]:
self.config_tester.run_common_tests()
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> int:
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCAmelCase__ )
def UpperCamelCase ( _A ):
"""simple docstring"""
return torch.tensor(
_A, dtype=torch.long, device=_A, )
__magic_name__: str = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
@unittest.skip("""Model is not available.""" )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Tuple = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
__magic_name__ : List[Any] = os.path.join(os.environ["""MYDIR"""] , lowerCAmelCase__ )
__magic_name__ : Any = MegatronBertModel.from_pretrained(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.half()
__magic_name__ : Tuple = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__magic_name__ : Dict = model(lowerCAmelCase__ )[0]
__magic_name__ : Union[str, Any] = torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , lowerCAmelCase__ )
__magic_name__ : str = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3 ):
for jj in range(3 ):
__magic_name__ : Optional[Any] = output[0, ii, jj]
__magic_name__ : Any = expected[3 * ii + jj]
__magic_name__ : Union[str, Any] = """ii={} jj={} a={} b={}""".format(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.assertTrue(math.isclose(lowerCAmelCase__ , lowerCAmelCase__ , rel_tol=lowerCAmelCase__ , abs_tol=lowerCAmelCase__ ) , msg=lowerCAmelCase__ )
| 324
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 324
| 1
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''ClapFeatureExtractor'''
__lowerCAmelCase = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
__a : int = kwargs.pop('''sampling_rate''' , _UpperCAmelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
__a : Union[str, Any] = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if audios is not None:
__a : Union[str, Any] = self.feature_extractor(
_UpperCAmelCase , sampling_rate=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and audios is not None:
__a : int = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def _lowerCamelCase ( self ):
__a : int = self.tokenizer.model_input_names
__a : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 101
|
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
A = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=768 ):
super().__init__(_UpperCAmelCase )
__a : str = proj_size
__a : Optional[Any] = CLIPVisionModel(_UpperCAmelCase )
__a : List[Any] = PaintByExampleMapper(_UpperCAmelCase )
__a : int = nn.LayerNorm(config.hidden_size )
__a : List[Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
__a : int = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=False ):
__a : str = self.model(pixel_values=_UpperCAmelCase )
__a : Union[str, Any] = clip_output.pooler_output
__a : Optional[int] = self.mapper(latent_states[:, None] )
__a : int = self.final_layer_norm(_UpperCAmelCase )
__a : Optional[Any] = self.proj_out(_UpperCAmelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class __lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
super().__init__()
__a : List[str] = (config.num_hidden_layers + 1) // 5
__a : Optional[Any] = config.hidden_size
__a : str = 1
__a : Union[str, Any] = nn.ModuleList(
[
BasicTransformerBlock(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , activation_fn='''gelu''' , attention_bias=_UpperCAmelCase )
for _ in range(_UpperCAmelCase )
] )
def _lowerCamelCase ( self , _UpperCAmelCase ):
for block in self.blocks:
__a : Union[str, Any] = block(_UpperCAmelCase )
return hidden_states
| 101
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : str = logging.get_logger(__name__)
a_ : Any = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class _snake_case ( A__ ):
_lowercase : List[Any] = '''lxmert'''
_lowercase : Any = {}
def __init__( self , a=3_0522 , a=768 , a=12 , a=9500 , a=1600 , a=400 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=2 , a=0.02 , a=1E-12 , a=9 , a=5 , a=5 , a=2048 , a=4 , a=6.67 , a=True , a=True , a=True , a=True , a=True , a=True , a=True , **a , ) -> Optional[int]:
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = num_qa_labels
SCREAMING_SNAKE_CASE = num_object_labels
SCREAMING_SNAKE_CASE = num_attr_labels
SCREAMING_SNAKE_CASE = l_layers
SCREAMING_SNAKE_CASE = x_layers
SCREAMING_SNAKE_CASE = r_layers
SCREAMING_SNAKE_CASE = visual_feat_dim
SCREAMING_SNAKE_CASE = visual_pos_dim
SCREAMING_SNAKE_CASE = visual_loss_normalizer
SCREAMING_SNAKE_CASE = task_matched
SCREAMING_SNAKE_CASE = task_mask_lm
SCREAMING_SNAKE_CASE = task_obj_predict
SCREAMING_SNAKE_CASE = task_qa
SCREAMING_SNAKE_CASE = visual_obj_loss
SCREAMING_SNAKE_CASE = visual_attr_loss
SCREAMING_SNAKE_CASE = visual_feat_loss
SCREAMING_SNAKE_CASE = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**a)
| 73
|
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
UpperCAmelCase_ = namedtuple('covid_data', 'cases deaths recovered')
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str = "https://www.worldometers.info/coronavirus/" ):
'''simple docstring'''
UpperCAmelCase__ = """//div[@class = \"maincounter-number\"]/span/text()"""
return covid_data(*html.fromstring(requests.get(SCREAMING_SNAKE_CASE__ ).content ).xpath(SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase_ = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 603
| 0
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
__a : List[Any] = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 522
|
from __future__ import annotations
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
if b == 0:
return (1, 0)
((__lowercase) , (__lowercase)) = extended_euclid(lowercase , a % b )
__lowercase = a // b
return (y, x - k * y)
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(lowercase , lowercase )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(lowercase , lowercase )
if b < 0:
__lowercase = (b % n + n) % n
return b
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
__lowercase , __lowercase = invert_modulo(lowercase , lowercase ), invert_modulo(lowercase , lowercase )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 522
| 1
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple ):
# ===== initialization =====
lowerCAmelCase = Mock()
lowerCAmelCase = conn, Mock()
lowerCAmelCase = iter([1, None] )
lowerCAmelCase = lambda lowerCamelCase : next(lowerCamelCase )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 133
|
'''simple docstring'''
def a_ ( lowerCamelCase : float , lowerCamelCase : float ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 133
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=_A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:Tuple = ['speech']
def __init__( self , *_a , **_a ):
"""simple docstring"""
requires_backends(self , ['speech'] )
class _UpperCamelCase ( metaclass=_A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:Union[str, Any] = ['speech']
def __init__( self , *_a , **_a ):
"""simple docstring"""
requires_backends(self , ['speech'] )
| 126
|
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , _a , _a , _a = None , _a = None ):
"""simple docstring"""
super().__init__()
a__ = pad_token_id
a__ = max_length
a__ = vocab
a__ = merges
a__ = BytePairTokenizer(_a , _a , sequence_length=_a )
@classmethod
def lowercase__ ( cls , _a , *_a , **_a ):
"""simple docstring"""
a__ = [' '.join(_a ) for m in tokenizer.bpe_ranks.keys()]
a__ = tokenizer.get_vocab()
return cls(_a , _a , *_a , **_a )
@classmethod
def lowercase__ ( cls , _a , *_a , **_a ):
"""simple docstring"""
a__ = GPTaTokenizer.from_pretrained(_a , *_a , **_a )
return cls.from_tokenizer(_a , *_a , **_a )
@classmethod
def lowercase__ ( cls , _a ):
"""simple docstring"""
return cls(**_a )
def lowercase__ ( self ):
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowercase__ ( self , _a , _a = None ):
"""simple docstring"""
a__ = self.tf_tokenizer(_a )
a__ = tf.ones_like(_a )
if self.pad_token_id is not None:
# pad the tokens up to max length
a__ = max_length if max_length is not None else self.max_length
if max_length is not None:
a__ , a__ = pad_model_inputs(
_a , max_seq_length=_a , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 126
| 1
|
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __UpperCAmelCase ( lowerCamelCase_ : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for line in lines:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.sub(R'#.*' , '' , lowerCamelCase_ ) # remove comments
if line:
filtered_lines.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = '\n'.join(lowerCamelCase_ )
# Make a hash from all this code
SCREAMING_SNAKE_CASE_ : Tuple = full_str.encode('utf-8' )
return shaaaa(lowerCamelCase_ ).hexdigest()
# get importable module names and hash for caching
UpperCamelCase__ : Optional[Any] = {
'''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCamelCase__ : Tuple = {
'''.csv''': ('''csv''', {}),
'''.tsv''': ('''csv''', {'''sep''': '''\t'''}),
'''.json''': ('''json''', {}),
'''.jsonl''': ('''json''', {}),
'''.parquet''': ('''parquet''', {}),
'''.arrow''': ('''arrow''', {}),
'''.txt''': ('''text''', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCamelCase__ : Tuple = {'''imagefolder''', '''audiofolder'''}
# Used to filter data files based on extensions given a module name
UpperCamelCase__ : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
| 105
|
"""simple docstring"""
from math import factorial
UpperCAmelCase : Tuple = {str(d): factorial(d) for d in range(10)}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
return sum(DIGIT_FACTORIAL[d] for d in str(__lowerCAmelCase ) )
def _SCREAMING_SNAKE_CASE () -> int:
'''simple docstring'''
lowercase_ = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , __lowerCAmelCase ) if sum_of_digit_factorial(__lowerCAmelCase ) == i )
if __name__ == "__main__":
print(F"{solution() = }")
| 567
| 0
|
def a__ ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 ):
'''simple docstring'''
lowerCAmelCase : List[Any] = right or len(__A ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__A , __A , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 681
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_a = StableDiffusionInstructPixaPixPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
_a = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_a = IMAGE_TO_IMAGE_IMAGE_PARAMS
_a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ :Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCamelCase__ :List[str] = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
UpperCamelCase__ :Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase__ :int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCamelCase__ :Tuple = CLIPTextModel(UpperCamelCase_ )
UpperCamelCase__ :str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ :Dict = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
'''simple docstring'''
UpperCamelCase__ :int = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ :List[Any] = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' )
if str(UpperCamelCase_ ).startswith('''mps''' ):
UpperCamelCase__ :Tuple = torch.manual_seed(UpperCamelCase_ )
else:
UpperCamelCase__ :List[Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ :str = self.get_dummy_components()
UpperCamelCase__ :Optional[Any] = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
UpperCamelCase__ :List[str] = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ :Dict = sd_pipe(**UpperCamelCase_ ).images
UpperCamelCase__ :str = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ :Union[str, Any] = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ :Optional[Any] = self.get_dummy_components()
UpperCamelCase__ :Any = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
UpperCamelCase__ :Tuple = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :Tuple = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = '''french fries'''
UpperCamelCase__ :Any = sd_pipe(**UpperCamelCase_ , negative_prompt=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = output.images
UpperCamelCase__ :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ :List[str] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ :Dict = self.get_dummy_components()
UpperCamelCase__ :str = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
UpperCamelCase__ :Any = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :Dict = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ :str = [inputs['''prompt''']] * 2
UpperCamelCase__ :str = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0
UpperCamelCase__ :int = torch.from_numpy(UpperCamelCase_ ).unsqueeze(0 ).to(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = image / 2 + 0.5
UpperCamelCase__ :Union[str, Any] = image.permute(0 , 3 , 1 , 2 )
UpperCamelCase__ :Union[str, Any] = image.repeat(2 , 1 , 1 , 1 )
UpperCamelCase__ :Optional[Any] = sd_pipe(**UpperCamelCase_ ).images
UpperCamelCase__ :Any = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
UpperCamelCase__ :List[Any] = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ :Tuple = self.get_dummy_components()
UpperCamelCase__ :Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
UpperCamelCase__ :Optional[int] = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
UpperCamelCase__ :List[Any] = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :str = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ :str = sd_pipe(**UpperCamelCase_ ).images
UpperCamelCase__ :Tuple = image[0, -3:, -3:, -1]
UpperCamelCase__ :Dict = [round(UpperCamelCase_ , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(UpperCamelCase_ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ :Union[str, Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.get_dummy_components()
UpperCamelCase__ :Optional[Any] = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
UpperCamelCase__ :List[Any] = VaeImageProcessor(do_resize=UpperCamelCase_ , do_normalize=UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = pipe(**self.get_dummy_inputs_by_type(UpperCamelCase_ , input_image_type='''pt''' ) )[0]
UpperCamelCase__ :List[Any] = components['''vae''']
UpperCamelCase__ :Optional[Any] = self.get_dummy_inputs_by_type(UpperCamelCase_ , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
UpperCamelCase__ :List[str] = vae.encode(inputs[image_param] ).latent_dist.mode()
UpperCamelCase__ :str = pipe(**UpperCamelCase_ )[0]
UpperCamelCase__ :Optional[int] = np.abs(out - out_latents_inputs ).max()
self.assertLess(UpperCamelCase_ , 1e-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self , UpperCamelCase_=0 ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = torch.manual_seed(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
UpperCamelCase__ :Dict = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
UpperCamelCase__ :Optional[int] = self.get_inputs()
UpperCamelCase__ :str = pipe(**UpperCamelCase_ ).images
UpperCamelCase__ :Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ :str = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=UpperCamelCase_ )
UpperCamelCase__ :Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
UpperCamelCase__ :int = self.get_inputs()
UpperCamelCase__ :List[str] = pipe(**UpperCamelCase_ ).images
UpperCamelCase__ :str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ :Optional[int] = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=UpperCamelCase_ )
UpperCamelCase__ :str = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
UpperCamelCase__ :Union[str, Any] = self.get_inputs()
UpperCamelCase__ :Dict = pipe(**UpperCamelCase_ ).images
UpperCamelCase__ :Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ :Tuple = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = 0
def callback_fn(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> None:
UpperCamelCase__ :Optional[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCamelCase__ :Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCamelCase__ :Optional[Any] = latents[0, -3:, -3:, -1]
UpperCamelCase__ :List[str] = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
UpperCamelCase__ :Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCamelCase__ :Union[str, Any] = latents[0, -3:, -3:, -1]
UpperCamelCase__ :List[Any] = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
UpperCamelCase__ :Union[str, Any] = False
UpperCamelCase__ :Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=UpperCamelCase_ , torch_dtype=torch.floataa )
UpperCamelCase__ :Dict = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
UpperCamelCase__ :Dict = self.get_inputs()
pipe(**UpperCamelCase_ , callback=UpperCamelCase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase__ :Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=UpperCamelCase_ , torch_dtype=torch.floataa )
UpperCamelCase__ :List[str] = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCamelCase__ :Any = self.get_inputs()
UpperCamelCase__ :Tuple = pipe(**UpperCamelCase_ )
UpperCamelCase__ :str = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCamelCase__ :int = inputs['''image'''].resize((504, 504) )
UpperCamelCase__ :Union[str, Any] = '''timbrooks/instruct-pix2pix'''
UpperCamelCase__ :str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
UpperCamelCase__ :List[str] = pipe(**UpperCamelCase_ )
UpperCamelCase__ :str = output.images[0]
UpperCamelCase__ :str = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
UpperCamelCase__ :Dict = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 189
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase ( A__ ):
"""simple docstring"""
_a = ['image_processor', 'tokenizer']
_a = 'ChineseCLIPImageProcessor'
_a = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCamelCase_ , )
UpperCamelCase__ :List[Any] = kwargs.pop('''feature_extractor''' )
UpperCamelCase__ :List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Any = self.image_processor
def __call__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCamelCase__ :Tuple = self.tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if images is not None:
UpperCamelCase__ :Any = self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if text is not None and images is not None:
UpperCamelCase__ :Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase_ ) , tensor_type=UpperCamelCase_ )
def lowerCAmelCase__ ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = self.tokenizer.model_input_names
UpperCamelCase__ :Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCamelCase_ , )
return self.image_processor_class
| 189
| 1
|
import numpy as np
import datasets
__a : List[str] = """
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
"""
__a : Dict = """\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
"""
__a : List[str] = """
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{'mahalanobis': array([0.5])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
# convert to numpy arrays
UpperCamelCase = np.array(SCREAMING_SNAKE_CASE )
UpperCamelCase = np.array(SCREAMING_SNAKE_CASE )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
UpperCamelCase = X - np.mean(SCREAMING_SNAKE_CASE )
UpperCamelCase = np.cov(reference_distribution.T )
try:
UpperCamelCase = np.linalg.inv(SCREAMING_SNAKE_CASE )
except np.linalg.LinAlgError:
UpperCamelCase = np.linalg.pinv(SCREAMING_SNAKE_CASE )
UpperCamelCase = np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase = np.dot(SCREAMING_SNAKE_CASE , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 414
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
lowercase = """naver-clova-ix/donut-base-finetuned-docvqa"""
lowercase = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
lowercase = """document_qa"""
lowercase = AutoProcessor
lowercase = VisionEncoderDecoderModel
lowercase = ["""image""", """text"""]
lowercase = ["""text"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
UpperCamelCase = task_prompt.replace("{user_input}" , SCREAMING_SNAKE_CASE )
UpperCamelCase = self.pre_processor.tokenizer(
SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_tensors="pt" ).input_ids
UpperCamelCase = self.pre_processor(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=SCREAMING_SNAKE_CASE , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=SCREAMING_SNAKE_CASE , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=SCREAMING_SNAKE_CASE , ).sequences
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase = self.pre_processor.batch_decode(SCREAMING_SNAKE_CASE )[0]
UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
UpperCamelCase = re.sub(R"<.*?>" , "" , SCREAMING_SNAKE_CASE , count=1 ).strip() # remove first task start token
UpperCamelCase = self.pre_processor.tokenajson(SCREAMING_SNAKE_CASE )
return sequence["answer"]
| 414
| 1
|
"""simple docstring"""
import operator as op
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : int = []
UpperCAmelCase__ : Any = lambda __UpperCamelCase , __UpperCamelCase : int(x / y ) # noqa: E731 integer division operation
UpperCAmelCase__ : List[Any] = {
"""^""": op.pow,
"""*""": op.mul,
"""/""": div,
"""+""": op.add,
"""-""": op.sub,
} # operators & their respective operation
# print table header
print("""Symbol""".center(8 ) , """Action""".center(12 ) , """Stack""" , sep=""" | """ )
print("""-""" * (30 + len(__UpperCamelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__UpperCamelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("""push(""" + x + """)""").ljust(12 ) , """,""".join(__UpperCamelCase ) , sep=""" | """ )
else:
UpperCAmelCase__ : int = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + b + """)""").ljust(12 ) , """,""".join(__UpperCamelCase ) , sep=""" | """ )
UpperCAmelCase__ : Optional[Any] = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + a + """)""").ljust(12 ) , """,""".join(__UpperCamelCase ) , sep=""" | """ )
stack.append(
str(opr[x](int(__UpperCamelCase ) , int(__UpperCamelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("""push(""" + a + x + b + """)""").ljust(12 ) , """,""".join(__UpperCamelCase ) , sep=""" | """ , )
return int(stack[0] )
if __name__ == "__main__":
__UpperCAmelCase = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 65
|
"""simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class UpperCamelCase :
def __init__( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Optional[Any]:
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""" )
_a : int = img
_a : Optional[int] = img.shape[1]
_a : List[Any] = img.shape[0]
_a : Dict = dst_width
_a : Optional[int] = dst_height
_a : str = self.src_w / self.dst_w
_a : Union[str, Any] = self.src_h / self.dst_h
_a : Tuple = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def _lowercase ( self : Optional[Any] ) -> str:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
_a : List[Any] = self.img[self.get_y(UpperCAmelCase__ )][self.get_x(UpperCAmelCase__ )]
def _lowercase ( self : Any , UpperCAmelCase__ : int ) -> int:
return int(self.ratio_x * x )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : int ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
_snake_case , _snake_case = 800, 600
_snake_case = imread('image_data/lena.jpg', 1)
_snake_case = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 389
| 0
|
__UpperCAmelCase = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
__UpperCAmelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
__UpperCAmelCase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 712
|
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__UpperCAmelCase = 4
__UpperCAmelCase = 3
class lowercase__( snake_case__ ):
'''simple docstring'''
pass
def _lowerCamelCase ( A_ : List[str] ) -> Dict:
'''simple docstring'''
for shard in shards:
for i in range(A_ ):
yield {"i": i, "shard": shard}
def _lowerCamelCase ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] =int(os.environ["RANK"] )
UpperCamelCase__ : Optional[Any] =int(os.environ["WORLD_SIZE"] )
UpperCamelCase__ : Any =ArgumentParser()
parser.add_argument("--streaming" , type=A_ )
parser.add_argument("--local_rank" , type=A_ )
parser.add_argument("--num_workers" , type=A_ , default=0 )
UpperCamelCase__ : List[Any] =parser.parse_args()
UpperCamelCase__ : Union[str, Any] =args.streaming
UpperCamelCase__ : Optional[int] =args.num_workers
UpperCamelCase__ : Optional[Any] ={"shards": [f'''shard_{shard_idx}''' for shard_idx in range(A_ )]}
UpperCamelCase__ : Union[str, Any] =IterableDataset.from_generator(A_ , gen_kwargs=A_ )
if not streaming:
UpperCamelCase__ : Optional[Any] =Dataset.from_list(list(A_ ) )
UpperCamelCase__ : int =split_dataset_by_node(A_ , rank=A_ , world_size=A_ )
UpperCamelCase__ : Any =torch.utils.data.DataLoader(A_ , num_workers=A_ )
UpperCamelCase__ : List[Any] =NUM_SHARDS * NUM_ITEMS_PER_SHARD
UpperCamelCase__ : Union[str, Any] =full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
UpperCamelCase__ : Optional[int] =sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 582
| 0
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__a = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__a = 12_80_22
__a = 12_80_28
@require_sentencepiece
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
A : Dict = MaMaaaTokenizer
A : Dict = False
A : Union[str, Any] = False
A : Union[str, Any] = True
def __lowerCamelCase ( self ):
super().setUp()
lowercase : Any = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
lowercase : Optional[int] = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
lowercase : str = Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE__ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(SCREAMING_SNAKE_CASE__ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
lowercase : Union[str, Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return (
"This is a test",
"This is a test",
)
def __lowerCamelCase ( self ):
lowercase : str = '''</s>'''
lowercase : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : List[str] = self.get_tokenizer()
lowercase : Dict = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<s>''' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
lowercase : Optional[int] = self.get_tokenizer()
lowercase : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [2, 3, 4, 5, 6] , )
lowercase : Union[str, Any] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
lowercase : Union[str, Any] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , '''This is a test''' )
@slow
def __lowerCamelCase ( self ):
# fmt: off
lowercase : str = {'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
A : Optional[Any] = 'facebook/m2m100_418M'
A : Union[str, Any] = [
'In my opinion, there are two levels of response from the French government.',
'NSA Affair Emphasizes Complete Lack of Debate on Intelligence',
]
A : Any = [
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
]
# fmt: off
A : str = [EN_CODE, 593, 1949, 11_5781, 4, 7_1586, 4234, 6_0633, 12_6233, 432, 12_3808, 1_5592, 1197, 11_7132, 12_0618, 5, 2]
@classmethod
def __lowerCamelCase ( cls ):
lowercase : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' )
lowercase : Union[str, Any] = 1
return cls
def __lowerCamelCase ( self ):
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 128006 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 128022 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 128076 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 128063 )
def __lowerCamelCase ( self ):
lowercase : List[Any] = self.tokenizer.get_vocab()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] , 3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Dict = '''en'''
lowercase : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
self.assertIn(SCREAMING_SNAKE_CASE__ , self.tokenizer.all_special_ids )
# fmt: off
lowercase : Optional[Any] = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
lowercase : Any = self.tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : List[str] = tempfile.mkdtemp()
lowercase : Optional[int] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = MaMaaaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(new_tok.lang_token_to_id , SCREAMING_SNAKE_CASE__ )
@require_torch
def __lowerCamelCase ( self ):
lowercase : Optional[int] = '''en'''
lowercase : Tuple = '''fr'''
lowercase : Optional[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
lowercase : Any = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
lowercase : Union[str, Any] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCamelCase ( self ):
lowercase : List[str] = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
lowercase : int = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
lowercase : Optional[int] = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCamelCase ( self ):
lowercase : List[str] = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[128022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128006,
} , )
| 319
|
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__a = logging.get_logger(__name__) # pylint: disable=invalid-name
__a = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=8 ) ->List[str]:
"""simple docstring"""
lowercase : Tuple = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __lowercase ( _UpperCamelCase, _UpperCamelCase=512, _UpperCamelCase=512 ) ->Optional[Any]:
"""simple docstring"""
lowercase : Union[str, Any] = pil_image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1 )
lowercase : List[Any] = np.array(pil_image.convert('''RGB''' ) )
lowercase : Optional[Any] = arr.astype(np.floataa ) / 1_2_7.5 - 1
lowercase : Tuple = np.transpose(_UpperCamelCase, [2, 0, 1] )
lowercase : Optional[Any] = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
super().__init__()
self.register_modules(
unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , movq=SCREAMING_SNAKE_CASE__ , )
lowercase : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# get the original timestep using init_timestep
lowercase : Optional[Any] = min(int(num_inference_steps * strength ) , SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = max(num_inference_steps - init_timestep , 0 )
lowercase : Any = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
if not isinstance(SCREAMING_SNAKE_CASE__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(SCREAMING_SNAKE_CASE__ )}""" )
lowercase : Tuple = image.to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase : int = image
else:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE__ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(SCREAMING_SNAKE_CASE__ )
]
lowercase : Dict = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
else:
lowercase : int = self.movq.encode(SCREAMING_SNAKE_CASE__ ).latent_dist.sample(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = self.movq.config.scaling_factor * init_latents
lowercase : Dict = torch.cat([init_latents] , dim=0 )
lowercase : List[str] = init_latents.shape
lowercase : List[str] = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
# get latents
lowercase : Optional[int] = self.scheduler.add_noise(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = init_latents
return latents
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase : Optional[int] = torch.device(f"""cuda:{gpu_id}""" )
lowercase : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__=0 ):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
lowercase : Optional[Any] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=SCREAMING_SNAKE_CASE__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase : Any = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase , lowercase : Union[str, Any] = cpu_offload_with_hook(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , prev_module_hook=SCREAMING_SNAKE_CASE__ )
# We'll offload the last model manually.
lowercase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCamelCase ( self ):
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE__ )
def __call__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 512 , SCREAMING_SNAKE_CASE__ = 512 , SCREAMING_SNAKE_CASE__ = 100 , SCREAMING_SNAKE_CASE__ = 4.0 , SCREAMING_SNAKE_CASE__ = 0.3 , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "pil" , SCREAMING_SNAKE_CASE__ = True , ):
lowercase : Tuple = self._execution_device
lowercase : Any = guidance_scale > 1.0
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
lowercase : List[Any] = image_embeds.shape[0]
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
if do_classifier_free_guidance:
lowercase : Optional[Any] = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
lowercase : List[str] = negative_image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
lowercase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE__ )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = [image]
if not all(isinstance(SCREAMING_SNAKE_CASE__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(SCREAMING_SNAKE_CASE__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
lowercase : int = torch.cat([prepare_image(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for i in image] , dim=0 )
lowercase : Any = image.to(dtype=image_embeds.dtype , device=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = self.movq.encode(SCREAMING_SNAKE_CASE__ )['''latents''']
lowercase : Optional[Any] = latents.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Any = self.get_timesteps(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Dict = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase , lowercase : Optional[int] = downscale_height_and_width(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.movq_scale_factor )
lowercase : Any = self.prepare_latents(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , image_embeds.dtype , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
lowercase : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase : Dict = {'''image_embeds''': image_embeds}
lowercase : List[Any] = self.unet(
sample=SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , added_cond_kwargs=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
if do_classifier_free_guidance:
lowercase , lowercase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase , lowercase : str = noise_pred.chunk(2 )
lowercase , lowercase : Dict = variance_pred.chunk(2 )
lowercase : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase : List[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase , lowercase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase : Optional[int] = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , )[0]
# post-processing
lowercase : Dict = self.movq.decode(SCREAMING_SNAKE_CASE__ , force_not_quantize=SCREAMING_SNAKE_CASE__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase : List[str] = image * 0.5 + 0.5
lowercase : Any = image.clamp(0 , 1 )
lowercase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase : List[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 319
| 1
|
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
)
__snake_case = None
__snake_case = {
"7B": 11_008,
"13B": 13_824,
"30B": 17_920,
"65B": 22_016,
"70B": 28_672,
}
__snake_case = {
"7B": 1,
"7Bf": 1,
"13B": 2,
"13Bf": 2,
"30B": 4,
"65B": 8,
"70B": 8,
"70Bf": 8,
}
def _lowercase ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int=1 , SCREAMING_SNAKE_CASE_ : Dict=256 ):
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def _lowercase ( SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
with open(__snake_case , """r""" ) as f:
return json.load(__snake_case )
def _lowercase ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
with open(__snake_case , """w""" ) as f:
json.dump(__snake_case , __snake_case )
def _lowercase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any]=True ):
"""simple docstring"""
os.makedirs(__snake_case , exist_ok=__snake_case )
UpperCamelCase = os.path.join(__snake_case , """tmp""" )
os.makedirs(__snake_case , exist_ok=__snake_case )
UpperCamelCase = read_json(os.path.join(__snake_case , """params.json""" ) )
UpperCamelCase = NUM_SHARDS[model_size]
UpperCamelCase = params["""n_layers"""]
UpperCamelCase = params["""n_heads"""]
UpperCamelCase = n_heads // num_shards
UpperCamelCase = params["""dim"""]
UpperCamelCase = dim // n_heads
UpperCamelCase = 1_0000.0
UpperCamelCase = 1.0 / (base ** (torch.arange(0 , __snake_case , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
UpperCamelCase = params["""n_kv_heads"""] # for GQA / MQA
UpperCamelCase = n_heads_per_shard // num_key_value_heads
UpperCamelCase = dim // num_key_value_heads
else: # compatibility with other checkpoints
UpperCamelCase = n_heads
UpperCamelCase = n_heads_per_shard
UpperCamelCase = dim
# permute for sliced rotary
def permute(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict=n_heads , SCREAMING_SNAKE_CASE_ : Optional[Any]=dim , SCREAMING_SNAKE_CASE_ : int=dim ):
return w.view(__snake_case , dima // n_heads // 2 , 2 , __snake_case ).transpose(1 , 2 ).reshape(__snake_case , __snake_case )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
UpperCamelCase = torch.load(os.path.join(__snake_case , """consolidated.00.pth""" ) , map_location="""cpu""" )
else:
# Sharded
UpperCamelCase = [
torch.load(os.path.join(__snake_case , f'consolidated.{i:02d}.pth' ) , map_location="""cpu""" )
for i in range(__snake_case )
]
UpperCamelCase = 0
UpperCamelCase = {"""weight_map""": {}}
for layer_i in range(__snake_case ):
UpperCamelCase = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
UpperCamelCase = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
UpperCamelCase = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
UpperCamelCase = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(__snake_case , __snake_case , __snake_case )
for i in range(__snake_case )
] , dim=0 , ).reshape(__snake_case , __snake_case ) )
UpperCamelCase = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
__snake_case , __snake_case , __snake_case )
for i in range(__snake_case )
] , dim=0 , ).reshape(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case , )
UpperCamelCase = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
__snake_case , __snake_case , __snake_case )
for i in range(__snake_case )
] , dim=0 , ).reshape(__snake_case , __snake_case )
UpperCamelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(__snake_case )] , dim=1 )
UpperCamelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(__snake_case )] , dim=0 )
UpperCamelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(__snake_case )] , dim=1 )
UpperCamelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(__snake_case )] , dim=0 )
UpperCamelCase = inv_freq
for k, v in state_dict.items():
UpperCamelCase = filename
param_count += v.numel()
torch.save(__snake_case , os.path.join(__snake_case , __snake_case ) )
UpperCamelCase = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
UpperCamelCase = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
UpperCamelCase = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(__snake_case )] , dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(__snake_case )] , dim=0 ),
}
for k, v in state_dict.items():
UpperCamelCase = filename
param_count += v.numel()
torch.save(__snake_case , os.path.join(__snake_case , __snake_case ) )
# Write configs
UpperCamelCase = {"""total_size""": param_count * 2}
write_json(__snake_case , os.path.join(__snake_case , """pytorch_model.bin.index.json""" ) )
UpperCamelCase = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
UpperCamelCase = params["""multiple_of"""] if """multiple_of""" in params else 256
UpperCamelCase = LlamaConfig(
hidden_size=__snake_case , intermediate_size=compute_intermediate_size(__snake_case , __snake_case , __snake_case ) , num_attention_heads=params["""n_heads"""] , num_hidden_layers=params["""n_layers"""] , rms_norm_eps=params["""norm_eps"""] , num_key_value_heads=__snake_case , )
config.save_pretrained(__snake_case )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
UpperCamelCase = LlamaForCausalLM.from_pretrained(__snake_case , torch_dtype=torch.floataa , low_cpu_mem_usage=__snake_case )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(__snake_case , safe_serialization=__snake_case )
shutil.rmtree(__snake_case )
def _lowercase ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
UpperCamelCase = tokenizer_class(__snake_case )
tokenizer.save_pretrained(__snake_case )
def _lowercase ( ):
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" , help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" , )
parser.add_argument(
"""--model_size""" , choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] , )
parser.add_argument(
"""--output_dir""" , help="""Location to write HF model and tokenizer""" , )
parser.add_argument("""--safe_serialization""" , type=__snake_case , help="""Whether or not to save using `safetensors`.""" )
UpperCamelCase = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
UpperCamelCase = os.path.join(args.input_dir , """tokenizer.model""" )
write_tokenizer(args.output_dir , __snake_case )
if __name__ == "__main__":
main()
| 712
|
import math
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase = [True] * n
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
UpperCamelCase = i * 2
while index < n:
UpperCamelCase = False
UpperCamelCase = index + i
UpperCamelCase = [2]
for i in range(3 , SCREAMING_SNAKE_CASE_ , 2 ):
if is_prime[i]:
primes.append(SCREAMING_SNAKE_CASE_ )
return primes
def _lowercase ( SCREAMING_SNAKE_CASE_ : int = 999_966_663_333 ):
"""simple docstring"""
UpperCamelCase = math.floor(math.sqrt(SCREAMING_SNAKE_CASE_ ) ) + 100
UpperCamelCase = prime_sieve(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = primes[prime_index]
while (last_prime**2) <= limit:
UpperCamelCase = primes[prime_index + 1]
UpperCamelCase = last_prime**2
UpperCamelCase = next_prime**2
# Get numbers divisible by lps(current)
UpperCamelCase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
UpperCamelCase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
UpperCamelCase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
UpperCamelCase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 181
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase_ : Optional[int] = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , ):
"""simple docstring"""
if attention_mask is None:
_SCREAMING_SNAKE_CASE : Dict = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE : int = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_SCREAMING_SNAKE_CASE : Dict = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE : List[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowercase__ :
'''simple docstring'''
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=False , __snake_case=99 , __snake_case=16 , __snake_case=2 , __snake_case=4 , __snake_case=4 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=32 , __snake_case=2 , __snake_case=1 , __snake_case=0 , __snake_case=0.02 , ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = parent
_SCREAMING_SNAKE_CASE : Any = batch_size
_SCREAMING_SNAKE_CASE : List[str] = seq_length
_SCREAMING_SNAKE_CASE : Tuple = is_training
_SCREAMING_SNAKE_CASE : Any = use_labels
_SCREAMING_SNAKE_CASE : Tuple = vocab_size
_SCREAMING_SNAKE_CASE : Tuple = hidden_size
_SCREAMING_SNAKE_CASE : Any = num_hidden_layers
_SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
_SCREAMING_SNAKE_CASE : Any = intermediate_size
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
_SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
_SCREAMING_SNAKE_CASE : List[str] = eos_token_id
_SCREAMING_SNAKE_CASE : Optional[int] = pad_token_id
_SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id
_SCREAMING_SNAKE_CASE : List[str] = initializer_range
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_SCREAMING_SNAKE_CASE : Tuple = shift_tokens_right(__snake_case , 1 , 2 )
_SCREAMING_SNAKE_CASE : int = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__snake_case , )
_SCREAMING_SNAKE_CASE : Any = prepare_blenderbot_inputs_dict(__snake_case , __snake_case , __snake_case )
return config, inputs_dict
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Optional[Any] = 20
_SCREAMING_SNAKE_CASE : Any = model_class_name(__snake_case )
_SCREAMING_SNAKE_CASE : Dict = model.encode(inputs_dict["""input_ids"""] )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_SCREAMING_SNAKE_CASE : List[Any] = model.init_cache(decoder_input_ids.shape[0] , __snake_case , __snake_case )
_SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_SCREAMING_SNAKE_CASE : str = model.decode(
decoder_input_ids[:, :-1] , __snake_case , decoder_attention_mask=__snake_case , past_key_values=__snake_case , decoder_position_ids=__snake_case , )
_SCREAMING_SNAKE_CASE : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_SCREAMING_SNAKE_CASE : List[str] = model.decode(
decoder_input_ids[:, -1:] , __snake_case , decoder_attention_mask=__snake_case , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__snake_case , )
_SCREAMING_SNAKE_CASE : int = model.decode(__snake_case , __snake_case )
_SCREAMING_SNAKE_CASE : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = 20
_SCREAMING_SNAKE_CASE : Tuple = model_class_name(__snake_case )
_SCREAMING_SNAKE_CASE : Any = model.encode(inputs_dict["""input_ids"""] )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_SCREAMING_SNAKE_CASE : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_SCREAMING_SNAKE_CASE : Tuple = model.init_cache(decoder_input_ids.shape[0] , __snake_case , __snake_case )
_SCREAMING_SNAKE_CASE : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_SCREAMING_SNAKE_CASE : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , __snake_case , decoder_attention_mask=__snake_case , past_key_values=__snake_case , decoder_position_ids=__snake_case , )
_SCREAMING_SNAKE_CASE : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_SCREAMING_SNAKE_CASE : Tuple = model.decode(
decoder_input_ids[:, -1:] , __snake_case , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__snake_case , decoder_position_ids=__snake_case , )
_SCREAMING_SNAKE_CASE : Optional[Any] = model.decode(__snake_case , __snake_case , decoder_attention_mask=__snake_case )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
A_ : str = 99
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_SCREAMING_SNAKE_CASE : List[str] = input_ids.shape[0]
_SCREAMING_SNAKE_CASE : Tuple = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = self._get_config_and_data()
_SCREAMING_SNAKE_CASE : Tuple = FlaxBlenderbotSmallForConditionalGeneration(__snake_case )
_SCREAMING_SNAKE_CASE : int = lm_model(input_ids=__snake_case )
_SCREAMING_SNAKE_CASE : Any = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , __snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_SCREAMING_SNAKE_CASE : Tuple = FlaxBlenderbotSmallForConditionalGeneration(__snake_case )
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_SCREAMING_SNAKE_CASE : str = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_SCREAMING_SNAKE_CASE : Any = lm_model(input_ids=__snake_case , decoder_input_ids=__snake_case )
_SCREAMING_SNAKE_CASE : str = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , __snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_SCREAMING_SNAKE_CASE : Any = shift_tokens_right(__snake_case , 1 , 2 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.equal(__snake_case , 1 ).astype(np.floataa ).sum()
_SCREAMING_SNAKE_CASE : Any = np.equal(__snake_case , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__snake_case , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowercase__ ( _snake_case , unittest.TestCase , _snake_case ):
'''simple docstring'''
A_ : Dict = True
A_ : List[Any] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
A_ : List[str] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxBlenderbotSmallModelTester(self )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_SCREAMING_SNAKE_CASE : Optional[Any] = self._prepare_for_class(__snake_case , __snake_case )
_SCREAMING_SNAKE_CASE : int = model_class(__snake_case )
@jax.jit
def encode_jitted(__snake_case , __snake_case=None , **__snake_case ):
return model.encode(input_ids=__snake_case , attention_mask=__snake_case )
with self.subTest("""JIT Enabled""" ):
_SCREAMING_SNAKE_CASE : List[str] = encode_jitted(**__snake_case ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_SCREAMING_SNAKE_CASE : Any = encode_jitted(**__snake_case ).to_tuple()
self.assertEqual(len(__snake_case ) , len(__snake_case ) )
for jitted_output, output in zip(__snake_case , __snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_SCREAMING_SNAKE_CASE : str = model_class(__snake_case )
_SCREAMING_SNAKE_CASE : Any = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_SCREAMING_SNAKE_CASE : Any = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__snake_case , __snake_case , __snake_case ):
return model.decode(
decoder_input_ids=__snake_case , decoder_attention_mask=__snake_case , encoder_outputs=__snake_case , )
with self.subTest("""JIT Enabled""" ):
_SCREAMING_SNAKE_CASE : Dict = decode_jitted(**__snake_case ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_SCREAMING_SNAKE_CASE : Tuple = decode_jitted(**__snake_case ).to_tuple()
self.assertEqual(len(__snake_case ) , len(__snake_case ) )
for jitted_output, output in zip(__snake_case , __snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase_ ( self ):
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class_name.from_pretrained("""facebook/blenderbot_small-90M""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_SCREAMING_SNAKE_CASE : List[str] = np.ones((1, 1) ) * model.config.eos_token_id
_SCREAMING_SNAKE_CASE : Any = model(__snake_case )
self.assertIsNotNone(__snake_case )
| 533
|
'''simple docstring'''
import math
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_SCREAMING_SNAKE_CASE : List[str] = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1 , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = factor * value
_SCREAMING_SNAKE_CASE : Optional[Any] = value
while not is_prime(SCREAMING_SNAKE_CASE__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ )
return value
| 533
| 1
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : int ) ->str:
'''simple docstring'''
a : list[list[str]] = [[] for _ in range(_lowercase )]
a : Dict = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(_lowercase ) <= key:
return input_string
for position, character in enumerate(_lowercase ):
a : str = position % (lowest * 2) # puts it in bounds
a : Optional[Any] = min(_lowercase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_lowercase )
a : Optional[int] = ["".join(_lowercase ) for row in temp_grid]
a : str = "".join(_lowercase )
return output_string
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : int ) ->str:
'''simple docstring'''
a : str = []
a : Dict = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
a : list[list[str]] = [[] for _ in range(_lowercase )] # generates template
for position in range(len(_lowercase ) ):
a : Any = position % (lowest * 2) # puts it in bounds
a : Any = min(_lowercase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
a : List[str] = 0
for row in temp_grid: # fills in the characters
a : Union[str, Any] = input_string[counter : counter + len(_lowercase )]
grid.append(list(_lowercase ) )
counter += len(_lowercase )
a : Union[str, Any] = "" # reads as zigzag
for position in range(len(_lowercase ) ):
a : Tuple = position % (lowest * 2) # puts it in bounds
a : str = min(_lowercase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->dict[int, str]:
'''simple docstring'''
a : Optional[Any] = {}
for key_guess in range(1 , len(_lowercase ) ): # tries every key
a : int = decrypt(_lowercase , _lowercase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->int:
'''simple docstring'''
a : int = {}
a : Union[str, Any] = tokenizer(example["content"] , truncation=_lowercase )["input_ids"]
a : Any = len(example["content"] ) / len(output["input_ids"] )
return output
a : int = HfArgumentParser(PretokenizationArguments)
a : Optional[int] = parser.parse_args()
if args.num_workers is None:
a : Tuple = multiprocessing.cpu_count()
a : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a : Dict = time.time()
a : Tuple = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
a : Dict = time.time()
a : Tuple = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
a : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 31
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A_ , A_=7 , A_=3 , A_=10 , A_=18 , A_=30 , A_=4_00 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=None , ):
_UpperCamelCase = size if size is not None else {"""shortest_edge""": 18}
_UpperCamelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = num_frames
_UpperCamelCase = image_size
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean
_UpperCamelCase = image_std
_UpperCamelCase = crop_size
def a ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = VivitImageProcessor if is_vision_available() else None
def a ( self ):
_UpperCamelCase = VivitImageProcessingTester(self )
@property
def a ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a ( self ):
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "image_mean" ) )
self.assertTrue(hasattr(A_ , "image_std" ) )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
self.assertTrue(hasattr(A_ , "do_resize" ) )
self.assertTrue(hasattr(A_ , "do_center_crop" ) )
self.assertTrue(hasattr(A_ , "size" ) )
def a ( self ):
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def a ( self ):
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_UpperCamelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_UpperCamelCase = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCamelCase = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def a ( self ):
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_UpperCamelCase = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCamelCase = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def a ( self ):
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for video in video_inputs:
self.assertIsInstance(A_ , A_ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_UpperCamelCase = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCamelCase = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 138
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def _a ( lowerCamelCase ):
return x + 2
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = """x = 3"""
lowerCamelCase : Tuple = {}
lowerCamelCase : List[str] = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"""x""": 3} )
lowerCamelCase : Optional[int] = """x = y"""
lowerCamelCase : Tuple = {"""y""": 5}
lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 5, """y""": 5} )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = """y = add_two(x)"""
lowerCamelCase : List[Any] = {"""x""": 3}
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result is None
assert "tried to execute add_two" in out.out
def UpperCamelCase__ ( self ):
lowerCamelCase : int = """x = 3"""
lowerCamelCase : Dict = {}
lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"""x""": 3} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = """test_dict = {'x': x, 'y': add_two(x)}"""
lowerCamelCase : Optional[int] = {"""x""": 3}
lowerCamelCase : Tuple = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} )
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """x = 3\ny = 5"""
lowerCamelCase : Optional[int] = {}
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """text = f'This is x: {x}.'"""
lowerCamelCase : Optional[int] = {"""x""": 3}
lowerCamelCase : Optional[int] = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__magic_name__ , {"""x""": 3, """text""": """This is x: 3."""} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = """if x <= 3:\n y = 2\nelse:\n y = 5"""
lowerCamelCase : Tuple = {"""x""": 3}
lowerCamelCase : int = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 2} )
lowerCamelCase : Tuple = {"""x""": 8}
lowerCamelCase : Dict = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 8, """y""": 5} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = """test_list = [x, add_two(x)]"""
lowerCamelCase : List[Any] = {"""x""": 3}
lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
self.assertListEqual(__magic_name__ , [3, 5] )
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = """y = x"""
lowerCamelCase : List[Any] = {"""x""": 3}
lowerCamelCase : Any = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 3} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = """test_list = [x, add_two(x)]\ntest_list[1]"""
lowerCamelCase : Any = {"""x""": 3}
lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} )
lowerCamelCase : Any = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
lowerCamelCase : Dict = {"""x""": 3}
lowerCamelCase : Any = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = """x = 0\nfor i in range(3):\n x = i"""
lowerCamelCase : int = {}
lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""range""": range} , state=__magic_name__ )
assert result == 2
self.assertDictEqual(__magic_name__ , {"""x""": 2, """i""": 2} )
| 681
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a__ : Dict ={"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] =[
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any =[
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
a__ : List[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 720
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCamelCase )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE_ : ClassVar[Features] =Features({"question": Value("string" ), "context": Value("string" )} )
SCREAMING_SNAKE_CASE_ : ClassVar[Features] =Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
SCREAMING_SNAKE_CASE_ : str ="question"
SCREAMING_SNAKE_CASE_ : str ="context"
SCREAMING_SNAKE_CASE_ : str ="answers"
@property
def _lowerCamelCase ( self : List[Any] ):
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 434
| 0
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
A = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
__UpperCAmelCase : int = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__UpperCAmelCase : str = get_sagemaker_input()
else:
__UpperCAmelCase : List[Any] = get_cluster_input()
return config
def _UpperCamelCase ( UpperCamelCase=None ) -> Tuple:
"""simple docstring"""
if subparsers is not None:
__UpperCAmelCase : Tuple = subparsers.add_parser("config" , description=UpperCamelCase )
else:
__UpperCAmelCase : int = argparse.ArgumentParser("Accelerate config command" , description=UpperCamelCase )
parser.add_argument(
"--config_file" , default=UpperCamelCase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase )
return parser
def _UpperCamelCase ( UpperCamelCase ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : int = get_user_input()
if args.config_file is not None:
__UpperCAmelCase : Tuple = args.config_file
else:
if not os.path.isdir(UpperCamelCase ):
os.makedirs(UpperCamelCase )
__UpperCAmelCase : Optional[int] = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(UpperCamelCase )
else:
config.to_yaml_file(UpperCamelCase )
print(f"accelerate configuration saved at {config_file}" )
def _UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = config_command_parser()
__UpperCAmelCase : Union[str, Any] = parser.parse_args()
config_command(UpperCamelCase )
if __name__ == "__main__":
main()
| 77
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 299
| 0
|
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__A : Tuple = logging.get_logger(__name__)
__A : int = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
__A : str = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowercase ( UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] ):
"""simple docstring"""
for attribute in key.split("." ):
A__ : Dict =getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
A__ : str =getattr(UpperCamelCase , UpperCamelCase ).shape
else:
A__ : Dict =hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A__ : str =value
elif weight_type == "weight_g":
A__ : List[Any] =value
elif weight_type == "weight_v":
A__ : int =value
elif weight_type == "bias":
A__ : Optional[Any] =value
else:
A__ : List[str] =value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowercase ( UpperCamelCase : Optional[int] , UpperCamelCase : int ):
"""simple docstring"""
A__ : Dict =[]
A__ : Tuple =fairseq_model.state_dict()
A__ : List[Any] =hf_model.feature_extractor
for name, value in fairseq_dict.items():
A__ : Dict =False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == "group" , )
A__ : Any =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A__ : List[str] =True
if "*" in mapped_key:
A__ : int =name.split(UpperCamelCase )[0].split("." )[-2]
A__ : Optional[int] =mapped_key.replace("*" , UpperCamelCase )
if "weight_g" in name:
A__ : List[str] ="weight_g"
elif "weight_v" in name:
A__ : Union[str, Any] ="weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
A__ : Tuple ="bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A__ : Tuple ="weight"
else:
A__ : Optional[Any] =None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase ( UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : Dict , UpperCamelCase : List[Any] ):
"""simple docstring"""
A__ : Tuple =full_name.split("conv_layers." )[-1]
A__ : List[Any] =name.split("." )
A__ : Optional[Any] =int(items[0] )
A__ : Optional[Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A__ : List[Any] =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A__ : List[str] =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A__ : Tuple =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A__ : Tuple =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCamelCase )
@torch.no_grad()
def lowercase ( UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : Optional[Any]=None ):
"""simple docstring"""
# load the pre-trained checkpoints
A__ : Optional[int] =torch.load(UpperCamelCase )
A__ : Tuple =WavLMConfigOrig(checkpoint["cfg"] )
A__ : Dict =WavLMOrig(UpperCamelCase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
A__ : Any =WavLMConfig.from_pretrained(UpperCamelCase )
else:
A__ : Union[str, Any] =WavLMConfig()
A__ : List[str] =WavLMModel(UpperCamelCase )
recursively_load_weights(UpperCamelCase , UpperCamelCase )
hf_wavlm.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
__A : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
__A : List[str] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 595
|
"""simple docstring"""
from collections import defaultdict
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
A__ : Union[str, Any] =1
A__ : int =True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCamelCase )
if ret % 2 == 0:
cuts.append(UpperCamelCase )
return ret
def lowercase ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
__A , __A : List[str] = 10, 9
__A : Dict = defaultdict(list)
__A : dict[int, bool] = {}
__A : list[int] = []
__A : List[str] = 0
__A : str = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 595
| 1
|
'''simple docstring'''
import heapq
def a__ ( _SCREAMING_SNAKE_CASE : dict ) -> set[int]:
"""simple docstring"""
UpperCAmelCase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_SCREAMING_SNAKE_CASE , [-1 * len(_SCREAMING_SNAKE_CASE ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase_ : Optional[int] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase_ : Tuple = heapq.heappop(_SCREAMING_SNAKE_CASE )[1][0]
chosen_vertices.add(_SCREAMING_SNAKE_CASE )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase_ : Any = elem[1][1].index(_SCREAMING_SNAKE_CASE )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_SCREAMING_SNAKE_CASE )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 71
|
'''simple docstring'''
import heapq
def a__ ( _SCREAMING_SNAKE_CASE : dict ) -> set[int]:
"""simple docstring"""
UpperCAmelCase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_SCREAMING_SNAKE_CASE , [-1 * len(_SCREAMING_SNAKE_CASE ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase_ : Optional[int] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase_ : Tuple = heapq.heappop(_SCREAMING_SNAKE_CASE )[1][0]
chosen_vertices.add(_SCREAMING_SNAKE_CASE )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase_ : Any = elem[1][1].index(_SCREAMING_SNAKE_CASE )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_SCREAMING_SNAKE_CASE )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 71
| 1
|
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
a : List[str] = ''''''
a : int = ''''''
a : List[Any] = ''''''
a : Optional[int] = ''''''
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> None:
# authorize twitter, initialize tweepy
__snake_case = tweepy.OAuthHandler(_UpperCAmelCase , _UpperCAmelCase )
auth.set_access_token(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = tweepy.API(_UpperCAmelCase )
# initialize a list to hold all the tweepy Tweets
__snake_case = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__snake_case = api.user_timeline(screen_name=_UpperCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(_UpperCAmelCase )
# save the id of the oldest tweet less one
__snake_case = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_UpperCAmelCase ) > 0:
print(F'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
__snake_case = api.user_timeline(
screen_name=_UpperCAmelCase , count=2_00 , max_id=_UpperCAmelCase )
# save most recent tweets
alltweets.extend(_UpperCAmelCase )
# update the id of the oldest tweet less one
__snake_case = alltweets[-1].id - 1
print(F'''...{len(_UpperCAmelCase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
__snake_case = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''' , "w" ) as f:
__snake_case = csv.writer(_UpperCAmelCase )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(_UpperCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 680
|
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a : Any = 6_378_137.0
a : List[Any] = 6_356_752.314_245
a : Dict = 6_378_137
def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
__snake_case = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__snake_case = haversine_distance(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__snake_case = (b_lata + b_lata) / 2
__snake_case = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__snake_case = (sin(_UpperCAmelCase ) ** 2) * (cos(_UpperCAmelCase ) ** 2)
__snake_case = cos(sigma / 2 ) ** 2
__snake_case = (sigma - sin(_UpperCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__snake_case = (cos(_UpperCAmelCase ) ** 2) * (sin(_UpperCAmelCase ) ** 2)
__snake_case = sin(sigma / 2 ) ** 2
__snake_case = (sigma + sin(_UpperCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_UpperCamelCase : List[str] = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[str] = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[str] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 541
|
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 36
| 0
|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=2 , _a=24 , _a=16 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , _a=2 , _a=2 , ):
"""simple docstring"""
a__ = parent
a__ = batch_size
a__ = patch_size
a__ = max_length
a__ = num_mel_bins
a__ = is_training
a__ = use_labels
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = type_sequence_label_size
a__ = initializer_range
a__ = scope
a__ = frequency_stride
a__ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
a__ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
a__ = (self.max_length - self.patch_size) // self.time_stride + 1
a__ = frequency_out_dimension * time_out_dimension
a__ = num_patches + 2
def lowercase__ ( self ):
"""simple docstring"""
a__ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ = self.get_config()
return config, input_values, labels
def lowercase__ ( self ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowercase__ ( self , _a , _a , _a ):
"""simple docstring"""
a__ = ASTModel(config=_a )
model.to(_a )
model.eval()
a__ = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'input_values': input_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( _A , _A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:Union[str, Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE:Any = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE:List[str] = False
SCREAMING_SNAKE_CASE:str = False
SCREAMING_SNAKE_CASE:Any = False
SCREAMING_SNAKE_CASE:Dict = False
def lowercase__ ( self , _a , _a , _a , _a , _a ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowercase__ ( self ):
"""simple docstring"""
a__ = ASTModelTester(self )
a__ = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def lowercase__ ( self ):
"""simple docstring"""
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(_a )
a__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ['input_values']
self.assertListEqual(arg_names[:1] , _a )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = ASTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
a__ = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' )
a__ , a__ = torchaudio.load(a )
return audio, sampling_rate
@require_torch
@require_torchaudio
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.default_feature_extractor
a__ = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(_a )
a__ = self.default_feature_extractor
a__ , a__ = prepare_audio()
a__ = audio.squeeze().numpy()
a__ = feature_extractor(_a , sampling_rate=_a , return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
a__ = model(**_a )
# verify the logits
a__ = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , _a )
a__ = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 126
|
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__A : Any = logging.get_logger(__name__)
__A : Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__A : Any = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCAmelCase_ ( a : Any , a : Tuple , a : Tuple , a : str , a : int ):
for attribute in key.split('.' ):
a__ = getattr(a , a )
if weight_type is not None:
a__ = getattr(a , a ).shape
else:
a__ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
a__ = value
elif weight_type == "weight_g":
a__ = value
elif weight_type == "weight_v":
a__ = value
elif weight_type == "bias":
a__ = value
else:
a__ = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowerCAmelCase_ ( a : Union[str, Any] , a : List[str] ):
a__ = []
a__ = fairseq_model.state_dict()
a__ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
a__ = False
if "conv_layers" in name:
load_conv_layer(
a , a , a , a , hf_model.config.feat_extract_norm == 'group' , )
a__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
a__ = True
if "*" in mapped_key:
a__ = name.split(a )[0].split('.' )[-2]
a__ = mapped_key.replace('*' , a )
if "weight_g" in name:
a__ = 'weight_g'
elif "weight_v" in name:
a__ = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
a__ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ = 'weight'
else:
a__ = None
set_recursively(a , a , a , a , a )
continue
if not is_used:
unused_weights.append(a )
logger.warning(f'''Unused weights: {unused_weights}''' )
def lowerCAmelCase_ ( a : Dict , a : Union[str, Any] , a : Optional[Any] , a : Union[str, Any] , a : Optional[int] ):
a__ = full_name.split('conv_layers.' )[-1]
a__ = name.split('.' )
a__ = int(items[0] )
a__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
a__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
a__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
a__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
a__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(a )
@torch.no_grad()
def lowerCAmelCase_ ( a : Optional[int] , a : Optional[Any] , a : List[Any]=None ):
# load the pre-trained checkpoints
a__ = torch.load(a )
a__ = WavLMConfigOrig(checkpoint['cfg'] )
a__ = WavLMOrig(a )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
a__ = WavLMConfig.from_pretrained(a )
else:
a__ = WavLMConfig()
a__ = WavLMModel(a )
recursively_load_weights(a , a )
hf_wavlm.save_pretrained(a )
if __name__ == "__main__":
__A : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__A : Dict = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 126
| 1
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ : Optional[Any] = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : PriorTransformer , _lowerCAmelCase : CLIPVisionModel , _lowerCAmelCase : CLIPImageProcessor , _lowerCAmelCase : HeunDiscreteScheduler , _lowerCAmelCase : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=_lowerCAmelCase , image_encoder=_lowerCAmelCase , image_processor=_lowerCAmelCase , scheduler=_lowerCAmelCase , renderer=_lowerCAmelCase , )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ):
if latents is None:
SCREAMING_SNAKE_CASE_ = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
SCREAMING_SNAKE_CASE_ = latents.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE_ = torch.device(F"cuda:{gpu_id}" )
SCREAMING_SNAKE_CASE_ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCAmelCase , _lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : int ):
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_lowerCAmelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : List[str] , ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(image[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE_ = torch.cat(_lowerCAmelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_lowerCAmelCase , axis=0 )
if not isinstance(_lowerCAmelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE_ = self.image_processor(_lowerCAmelCase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = image.to(dtype=self.image_encoder.dtype , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.image_encoder(_lowerCAmelCase )['last_hidden_state']
SCREAMING_SNAKE_CASE_ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
SCREAMING_SNAKE_CASE_ = image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ = torch.zeros_like(_lowerCAmelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE_ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_lowerCAmelCase )
def __call__( self : Tuple , _lowerCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 25 , _lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCAmelCase : Optional[torch.FloatTensor] = None , _lowerCAmelCase : float = 4.0 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : Optional[str] = "pil" , _lowerCAmelCase : bool = True , ):
if isinstance(_lowerCAmelCase , PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = 1
elif isinstance(_lowerCAmelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE_ = image.shape[0]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
SCREAMING_SNAKE_CASE_ = len(_lowerCAmelCase )
else:
raise ValueError(
F"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_lowerCAmelCase )}" )
SCREAMING_SNAKE_CASE_ = self._execution_device
SCREAMING_SNAKE_CASE_ = batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE_ = guidance_scale > 1.0
SCREAMING_SNAKE_CASE_ = self._encode_image(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# prior
self.scheduler.set_timesteps(_lowerCAmelCase , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps
SCREAMING_SNAKE_CASE_ = self.prior.config.num_embeddings
SCREAMING_SNAKE_CASE_ = self.prior.config.embedding_dim
SCREAMING_SNAKE_CASE_ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
SCREAMING_SNAKE_CASE_ = latents.reshape(latents.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.prior(
_lowerCAmelCase , timestep=_lowerCAmelCase , proj_embedding=_lowerCAmelCase , ).predicted_image_embedding
# remove the variance
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
SCREAMING_SNAKE_CASE_ = self.scheduler.step(
_lowerCAmelCase , timestep=_lowerCAmelCase , sample=_lowerCAmelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = []
for i, latent in enumerate(_lowerCAmelCase ):
print()
SCREAMING_SNAKE_CASE_ = self.renderer.decode(
latent[None, :] , _lowerCAmelCase , size=_lowerCAmelCase , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.stack(_lowerCAmelCase )
if output_type not in ["np", "pil"]:
raise ValueError(F"Only the output types `pil` and `np` are supported not output_type={output_type}" )
SCREAMING_SNAKE_CASE_ = images.cpu().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = [self.numpy_to_pil(_lowerCAmelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_lowerCAmelCase )
| 31
|
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__lowerCamelCase : List[Any] = random.Random()
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_=1.0 , lowerCamelCase_=None , lowerCamelCase_=None ) -> Tuple:
if rng is None:
UpperCAmelCase = global_rng
UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __magic_name__ ( unittest.TestCase ):
def __init__( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Union[str, Any]=4_00 , UpperCamelCase__ : Optional[Any]=20_00 , UpperCamelCase__ : str=1 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : int=1_60_00 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Dict=80 , UpperCamelCase__ : List[str]=16 , UpperCamelCase__ : int=64 , UpperCamelCase__ : Dict="hann_window" , UpperCamelCase__ : Dict=80 , UpperCamelCase__ : Any=76_00 , UpperCamelCase__ : List[str]=1e-1_0 , UpperCamelCase__ : Optional[int]=True , ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = min_seq_length
UpperCAmelCase = max_seq_length
UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase = feature_size
UpperCAmelCase = padding_value
UpperCAmelCase = sampling_rate
UpperCAmelCase = do_normalize
UpperCAmelCase = num_mel_bins
UpperCAmelCase = hop_length
UpperCAmelCase = win_length
UpperCAmelCase = win_function
UpperCAmelCase = fmin
UpperCAmelCase = fmax
UpperCAmelCase = mel_floor
UpperCAmelCase = return_attention_mask
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : List[str]=False ) -> Tuple:
'''simple docstring'''
def _flatten(UpperCamelCase__ : List[Any] ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : int=False ) -> Dict:
'''simple docstring'''
if equal_length:
UpperCAmelCase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
class __magic_name__ ( A__, unittest.TestCase ):
lowercase : Optional[Any] =SpeechTaFeatureExtractor
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = SpeechTaFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCamelCase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(UpperCamelCase__ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCamelCase__ , axis=0 ) - 1 ) < 1e-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# Test batched
UpperCAmelCase = feat_extract(UpperCamelCase__ , return_tensors="np" ).input_values
UpperCAmelCase = feat_extract(UpperCamelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Dict:
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase = ["longest", "max_length", "do_not_pad"]
UpperCAmelCase = [None, 16_00, None]
for max_length, padding in zip(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = feat_extract(UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors="np" )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self.assertTrue(input_values[0][8_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self.assertTrue(input_values[0][10_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = range(8_00 , 14_00 , 2_00 )
UpperCAmelCase = [floats_list((1, x) )[0] for x in lengths]
UpperCAmelCase = ["longest", "max_length", "do_not_pad"]
UpperCAmelCase = [None, 16_00, None]
for max_length, padding in zip(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = feat_extract(UpperCamelCase__ , max_length=UpperCamelCase__ , padding=UpperCamelCase__ )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase = feat_extract(
UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=10_00 , padding="max_length" , return_tensors="np" )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase = feat_extract(
UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=10_00 , padding="longest" , return_tensors="np" )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00) )
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase = feat_extract(
UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=20_00 , padding="longest" , return_tensors="np" )
UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00) )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> str:
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = np.random.rand(1_00 ).astype(np.floataa )
UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCAmelCase = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
UpperCAmelCase = feature_extractor(audio_target=UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# Test batched
UpperCAmelCase = feature_extractor(UpperCamelCase__ , return_tensors="np" ).input_values
UpperCAmelCase = feature_extractor(UpperCamelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
UpperCAmelCase = np.asarray(UpperCamelCase__ )
UpperCAmelCase = feature_extractor(UpperCamelCase__ , return_tensors="np" ).input_values
UpperCAmelCase = feature_extractor(UpperCamelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(UpperCamelCase__ ) == len(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , processed_features[input_name] ) ) )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=UpperCamelCase__ )
UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=UpperCamelCase__ )
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="np" )[input_name]
UpperCAmelCase = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = self.feat_extract_dict
UpperCAmelCase = True
UpperCAmelCase = self.feature_extraction_class(**UpperCamelCase__ )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = [len(UpperCamelCase__ ) for x in speech_inputs]
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , UpperCamelCase__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = self.feat_extract_dict
UpperCAmelCase = True
UpperCAmelCase = self.feature_extraction_class(**UpperCamelCase__ )
UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase = [len(UpperCamelCase__ ) for x in speech_inputs]
UpperCAmelCase = feat_extract.model_input_names[0]
UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase = min(UpperCamelCase__ )
UpperCAmelCase = feat_extract.num_mel_bins # hack!
UpperCAmelCase = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="np" )
self.assertIn("attention_mask" , UpperCamelCase__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : str ) -> Any:
'''simple docstring'''
from datasets import load_dataset
UpperCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
UpperCAmelCase = ds.sort("id" ).select(range(UpperCamelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = torch.tensor(
[2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3,
3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3,
2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4,
4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3,
7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4,
4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] )
# fmt: on
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = feature_extractor(UpperCamelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 9_36_80) )
self.assertTrue(torch.allclose(input_values[0, :30] , UpperCamelCase__ , atol=1e-6 ) )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = torch.tensor(
[-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77,
-3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86,
-3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71,
-3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] )
# fmt: on
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = SpeechTaFeatureExtractor()
UpperCAmelCase = feature_extractor(audio_target=UpperCamelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 3_66, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , UpperCamelCase__ , atol=1e-4 ) )
| 323
| 0
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCAmelCase ( UpperCAmelCase ) ->bool:
"""simple docstring"""
__magic_name__ : Union[str, Any] = int(number**0.5 )
return number == sq * sq
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) ->tuple[int, int]:
"""simple docstring"""
__magic_name__ : List[Any] = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__magic_name__ : List[str] = x_den * y_den * z_den
__magic_name__ : Union[str, Any] = gcd(__UpperCamelCase, __UpperCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def lowerCAmelCase ( UpperCAmelCase = 35 ) ->int:
"""simple docstring"""
__magic_name__ : Optional[Any] = set()
__magic_name__ : int = 42
__magic_name__ : Optional[int] = Fraction(0 )
__magic_name__ : Dict = 42
for x_num in range(1, order + 1 ):
for x_den in range(x_num + 1, order + 1 ):
for y_num in range(1, order + 1 ):
for y_den in range(y_num + 1, order + 1 ):
# n=1
__magic_name__ : Union[str, Any] = x_num * y_den + x_den * y_num
__magic_name__ : Any = x_den * y_den
__magic_name__ : List[str] = gcd(__UpperCamelCase, __UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__magic_name__ : Optional[int] = add_three(
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
unique_s.add(__UpperCamelCase )
# n=2
__magic_name__ : str = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__magic_name__ : str = x_den * x_den * y_den * y_den
if is_sq(__UpperCamelCase ) and is_sq(__UpperCamelCase ):
__magic_name__ : Any = int(sqrt(__UpperCamelCase ) )
__magic_name__ : Optional[Any] = int(sqrt(__UpperCamelCase ) )
__magic_name__ : List[Any] = gcd(__UpperCamelCase, __UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__magic_name__ : List[Any] = add_three(
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
unique_s.add(__UpperCamelCase )
# n=-1
__magic_name__ : List[str] = x_num * y_num
__magic_name__ : Union[str, Any] = x_den * y_num + x_num * y_den
__magic_name__ : str = gcd(__UpperCamelCase, __UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__magic_name__ : Dict = add_three(
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
unique_s.add(__UpperCamelCase )
# n=2
__magic_name__ : Tuple = x_num * x_num * y_num * y_num
__magic_name__ : Dict = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__UpperCamelCase ) and is_sq(__UpperCamelCase ):
__magic_name__ : int = int(sqrt(__UpperCamelCase ) )
__magic_name__ : List[str] = int(sqrt(__UpperCamelCase ) )
__magic_name__ : Optional[int] = gcd(__UpperCamelCase, __UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__magic_name__ : Optional[Any] = add_three(
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
unique_s.add(__UpperCamelCase )
for num, den in unique_s:
total += Fraction(__UpperCamelCase, __UpperCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"{solution() = }")
| 708
|
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase_ = 16
lowercase_ = 32
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase = 16 ) ->Any:
"""simple docstring"""
__magic_name__ : List[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__magic_name__ : Tuple = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ : Any = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=UpperCAmelCase, max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__magic_name__ : Tuple = datasets.map(
UpperCAmelCase, batched=UpperCAmelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ : str = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__magic_name__ : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__magic_name__ : Any = 16
elif accelerator.mixed_precision != "no":
__magic_name__ : Union[str, Any] = 8
else:
__magic_name__ : Optional[Any] = None
return tokenizer.pad(
UpperCAmelCase, padding='''longest''', max_length=UpperCAmelCase, pad_to_multiple_of=UpperCAmelCase, return_tensors='''pt''', )
# Instantiate dataloaders.
__magic_name__ : List[Any] = DataLoader(
tokenized_datasets['''train'''], shuffle=UpperCAmelCase, collate_fn=UpperCAmelCase, batch_size=UpperCAmelCase, drop_last=UpperCAmelCase )
__magic_name__ : Union[str, Any] = DataLoader(
tokenized_datasets['''validation'''], shuffle=UpperCAmelCase, collate_fn=UpperCAmelCase, batch_size=UpperCAmelCase, drop_last=(accelerator.mixed_precision == '''fp8'''), )
return train_dataloader, eval_dataloader
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ : int = config['''lr''']
__magic_name__ : Any = int(config['''num_epochs'''] )
__magic_name__ : List[str] = int(config['''seed'''] )
__magic_name__ : Optional[int] = int(config['''batch_size'''] )
__magic_name__ : Optional[Any] = evaluate.load('''glue''', '''mrpc''' )
# If the batch size is too big we use gradient accumulation
__magic_name__ : List[str] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__magic_name__ : List[str] = batch_size // MAX_GPU_BATCH_SIZE
__magic_name__ : str = MAX_GPU_BATCH_SIZE
set_seed(UpperCAmelCase )
__magic_name__ , __magic_name__ : int = get_dataloaders(UpperCAmelCase, UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__magic_name__ : Any = model.to(accelerator.device )
# Instantiate optimizer
__magic_name__ : int = AdamW(params=model.parameters(), lr=UpperCAmelCase )
# Instantiate scheduler
__magic_name__ : Tuple = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase, num_warmup_steps=100, num_training_steps=(len(UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] = accelerator.prepare(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
# Now we train the model
for epoch in range(UpperCAmelCase ):
model.train()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__magic_name__ : Dict = model(**UpperCAmelCase )
__magic_name__ : Tuple = outputs.loss
__magic_name__ : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ : Dict = model(**UpperCAmelCase )
__magic_name__ : List[str] = outputs.logits.argmax(dim=-1 )
__magic_name__ , __magic_name__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCAmelCase, references=UpperCAmelCase, )
__magic_name__ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''', UpperCAmelCase )
def lowerCAmelCase ( ) ->Optional[Any]:
"""simple docstring"""
__magic_name__ : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=UpperCAmelCase, default=UpperCAmelCase, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
__magic_name__ : Dict = parser.parse_args()
__magic_name__ : int = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCAmelCase, UpperCAmelCase )
if __name__ == "__main__":
main()
| 336
| 0
|
"""simple docstring"""
from statistics import mean
import numpy as np
def UpperCAmelCase ( snake_case : list , snake_case : list , snake_case : list , snake_case : int ):
_lowerCAmelCase:str = 0
# Number of processes finished
_lowerCAmelCase:int = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
_lowerCAmelCase:Tuple = [0] * no_of_process
# List to include calculation results
_lowerCAmelCase:Optional[int] = [0] * no_of_process
# Sort by arrival time.
_lowerCAmelCase:str = [burst_time[i] for i in np.argsort(snake_case )]
_lowerCAmelCase:Tuple = [process_name[i] for i in np.argsort(snake_case )]
arrival_time.sort()
while no_of_process > finished_process_count:
_lowerCAmelCase:str = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
_lowerCAmelCase:int = arrival_time[i]
_lowerCAmelCase:Dict = 0
# Index showing the location of the process being performed
_lowerCAmelCase:List[Any] = 0
# Saves the current response ratio.
_lowerCAmelCase:str = 0
for i in range(0 , snake_case ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
_lowerCAmelCase:Dict = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
_lowerCAmelCase:Optional[int] = temp
_lowerCAmelCase:str = i
# Calculate the turn around time
_lowerCAmelCase:Any = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
_lowerCAmelCase:Tuple = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def UpperCAmelCase ( snake_case : list , snake_case : list , snake_case : list , snake_case : int ):
_lowerCAmelCase:Any = [0] * no_of_process
for i in range(0 , snake_case ):
_lowerCAmelCase:Union[str, Any] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
UpperCamelCase__ = 5
UpperCamelCase__ = ['''A''', '''B''', '''C''', '''D''', '''E''']
UpperCamelCase__ = [1, 2, 3, 4, 5]
UpperCamelCase__ = [1, 2, 3, 4, 5]
UpperCamelCase__ = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
UpperCamelCase__ = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F"{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"
F"{turn_around_time[i]}\t\t\t{waiting_time[i]}"
)
print(F"average waiting time : {mean(waiting_time):.5f}")
print(F"average turn around time : {mean(turn_around_time):.5f}")
| 227
|
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase ( snake_case : int , snake_case : int ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase ( snake_case : int ):
_lowerCAmelCase:Optional[Any] = []
_lowerCAmelCase:Dict = 11
_lowerCAmelCase:int = int('''1''' + '''0''' * digit_len )
for num in range(snake_case , snake_case ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(snake_case , snake_case ):
solutions.append(F'{num}/{den}' )
den += 1
num += 1
_lowerCAmelCase:Optional[Any] = 10
return solutions
def UpperCAmelCase ( snake_case : int = 2 ):
_lowerCAmelCase:Optional[int] = 1.0
for fraction in fraction_list(snake_case ):
_lowerCAmelCase:Any = Fraction(snake_case )
result *= frac.denominator / frac.numerator
return int(snake_case )
if __name__ == "__main__":
print(solution())
| 227
| 1
|
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : List[str]=224 , UpperCAmelCase_ : Any=1_000 , UpperCAmelCase_ : Union[str, Any]=[3, 3, 6, 4] , UpperCAmelCase_ : Any=[48, 56, 112, 220] , ):
"""simple docstring"""
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Optional[Any] = batch_size
__UpperCAmelCase : Optional[int] = num_channels
__UpperCAmelCase : Any = is_training
__UpperCAmelCase : Any = use_labels
__UpperCAmelCase : Dict = hidden_dropout_prob
__UpperCAmelCase : List[str] = attention_probs_dropout_prob
__UpperCAmelCase : Any = num_labels
__UpperCAmelCase : List[Any] = image_size
__UpperCAmelCase : List[str] = layer_depths
__UpperCAmelCase : List[str] = embed_dims
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : str = None
if self.use_labels:
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=UpperCAmelCase_ , layer_scale_init_value=1e-5 , )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str ):
"""simple docstring"""
__UpperCAmelCase : int = SwiftFormerModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__UpperCAmelCase : Any = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] ):
"""simple docstring"""
__UpperCAmelCase : Any = self.num_labels
__UpperCAmelCase : str = SwiftFormerForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__UpperCAmelCase : Optional[int] = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__UpperCAmelCase : Dict = SwiftFormerForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : str = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : List[str] = self.prepare_config_and_inputs()
__UpperCAmelCase : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = SwiftFormerModelTester(self )
__UpperCAmelCase : List[str] = ConfigTester(
self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Tuple = model_class(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear ) )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : int = model_class(UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Tuple = [*signature.parameters.keys()]
__UpperCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : str = SwiftFormerModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] ):
__UpperCAmelCase : int = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
__UpperCAmelCase : str = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
__UpperCAmelCase : List[Any] = outputs.hidden_states
__UpperCAmelCase : Union[str, Any] = 8
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(UpperCAmelCase_ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Any = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
def _config_zero_init(UpperCAmelCase_ : List[str] ):
__UpperCAmelCase : int = copy.deepcopy(UpperCAmelCase_ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , 1e-10 )
if isinstance(getattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ ):
__UpperCAmelCase : str = _config_zero_init(getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return configs_no_init
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[str] = _config_zero_init(UpperCAmelCase_ )
for model_class in self.all_model_classes:
__UpperCAmelCase : int = model_class(config=UpperCAmelCase_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
pass
def __UpperCamelCase ( ):
__UpperCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : int = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(UpperCAmelCase_ )
__UpperCAmelCase : Dict = self.default_image_processor
__UpperCAmelCase : Union[str, Any] = prepare_img()
__UpperCAmelCase : Optional[Any] = image_processor(images=UpperCAmelCase_ , return_tensors="pt" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**UpperCAmelCase_ )
# verify the logits
__UpperCAmelCase : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
__UpperCAmelCase : str = torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4 ) )
| 329
|
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=UpperCAmelCase_ , dtype=jnp.bfloataa )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=UpperCAmelCase_ , from_pt=UpperCAmelCase_ , dtype=jnp.bfloataa )
__UpperCAmelCase : Any = controlnet_params
__UpperCAmelCase : Tuple = "bird"
__UpperCAmelCase : Optional[Any] = jax.device_count()
__UpperCAmelCase : Tuple = pipe.prepare_text_inputs([prompts] * num_samples )
__UpperCAmelCase : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
__UpperCAmelCase : str = pipe.prepare_image_inputs([canny_image] * num_samples )
__UpperCAmelCase : List[str] = jax.random.PRNGKey(0 )
__UpperCAmelCase : Optional[int] = jax.random.split(UpperCAmelCase_ , jax.device_count() )
__UpperCAmelCase : Tuple = replicate(UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = shard(UpperCAmelCase_ )
__UpperCAmelCase : List[str] = shard(UpperCAmelCase_ )
__UpperCAmelCase : Optional[int] = pipe(
prompt_ids=UpperCAmelCase_ , image=UpperCAmelCase_ , params=UpperCAmelCase_ , prng_seed=UpperCAmelCase_ , num_inference_steps=50 , jit=UpperCAmelCase_ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__UpperCAmelCase : List[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__UpperCAmelCase : List[Any] = images[0, 253:256, 253:256, -1]
__UpperCAmelCase : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__UpperCAmelCase : int = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Tuple = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=UpperCAmelCase_ , dtype=jnp.bfloataa )
__UpperCAmelCase , __UpperCAmelCase : Tuple = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=UpperCAmelCase_ , from_pt=UpperCAmelCase_ , dtype=jnp.bfloataa )
__UpperCAmelCase : Optional[int] = controlnet_params
__UpperCAmelCase : int = "Chef in the kitchen"
__UpperCAmelCase : Optional[int] = jax.device_count()
__UpperCAmelCase : int = pipe.prepare_text_inputs([prompts] * num_samples )
__UpperCAmelCase : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
__UpperCAmelCase : Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples )
__UpperCAmelCase : Optional[int] = jax.random.PRNGKey(0 )
__UpperCAmelCase : int = jax.random.split(UpperCAmelCase_ , jax.device_count() )
__UpperCAmelCase : Optional[Any] = replicate(UpperCAmelCase_ )
__UpperCAmelCase : Dict = shard(UpperCAmelCase_ )
__UpperCAmelCase : Any = shard(UpperCAmelCase_ )
__UpperCAmelCase : Optional[int] = pipe(
prompt_ids=UpperCAmelCase_ , image=UpperCAmelCase_ , params=UpperCAmelCase_ , prng_seed=UpperCAmelCase_ , num_inference_steps=50 , jit=UpperCAmelCase_ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__UpperCAmelCase : Optional[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__UpperCAmelCase : Tuple = images[0, 253:256, 253:256, -1]
__UpperCAmelCase : Any = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__UpperCAmelCase : int = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 329
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : List[str] = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ (lowercase_ ):
'''simple docstring'''
_a = "ibert"
def __init__( self : str , __a : int=30_522 , __a : Optional[int]=768 , __a : int=12 , __a : List[str]=12 , __a : Dict=3_072 , __a : str="gelu" , __a : int=0.1 , __a : List[Any]=0.1 , __a : Tuple=512 , __a : str=2 , __a : int=0.02 , __a : List[Any]=1e-12 , __a : Optional[Any]=1 , __a : Union[str, Any]=0 , __a : Optional[Any]=2 , __a : List[Any]="absolute" , __a : int=False , __a : int="none" , **__a : Any , ) ->str:
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
lowerCamelCase_ : List[str] = vocab_size
lowerCamelCase_ : List[Any] = hidden_size
lowerCamelCase_ : Optional[int] = num_hidden_layers
lowerCamelCase_ : Any = num_attention_heads
lowerCamelCase_ : int = hidden_act
lowerCamelCase_ : Optional[int] = intermediate_size
lowerCamelCase_ : Tuple = hidden_dropout_prob
lowerCamelCase_ : List[str] = attention_probs_dropout_prob
lowerCamelCase_ : int = max_position_embeddings
lowerCamelCase_ : str = type_vocab_size
lowerCamelCase_ : Union[str, Any] = initializer_range
lowerCamelCase_ : str = layer_norm_eps
lowerCamelCase_ : Dict = position_embedding_type
lowerCamelCase_ : Optional[int] = quant_mode
lowerCamelCase_ : str = force_dequant
class SCREAMING_SNAKE_CASE_ (lowercase_ ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self : Optional[int] ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase_ : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase_ : Tuple = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 278
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCamelCase ( unittest.TestCase ):
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=7 ,__UpperCamelCase=3 ,__UpperCamelCase=18 ,__UpperCamelCase=30 ,__UpperCamelCase=400 ,__UpperCamelCase=True ,__UpperCamelCase=None ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = size if size is not None else {'shortest_edge': 20}
lowercase_ : Optional[Any] = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowercase_ : str = parent
lowercase_ : str = batch_size
lowercase_ : Union[str, Any] = num_channels
lowercase_ : List[str] = image_size
lowercase_ : Dict = min_resolution
lowercase_ : Union[str, Any] = max_resolution
lowercase_ : Dict = do_resize
lowercase_ : Any = size
lowercase_ : str = do_center_crop
lowercase_ : Tuple = crop_size
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase ( lowercase_ , unittest.TestCase ):
lowercase = MobileNetVaImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Tuple = MobileNetVaImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase ,'do_resize' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'size' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'do_center_crop' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'crop_size' ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size ,{'height': 18, 'width': 18} )
lowercase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size ,{'height': 84, 'width': 84} )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase ,Image.Image )
# Test not batched input
lowercase_ : Optional[Any] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
lowercase_ : List[str] = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase ,numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase ,np.ndarray )
# Test not batched input
lowercase_ : str = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
lowercase_ : Optional[int] = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase ,torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase ,torch.Tensor )
# Test not batched input
lowercase_ : Dict = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
lowercase_ : Dict = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
| 425
| 0
|
"""simple docstring"""
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase__ : int = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __lowercase ( _a ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_a )
def __lowercase ( _a ):
from transformers.testing_utils import pytest_terminal_summary_main
snake_case_ : List[Any] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_a , id=_a )
| 715
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase ( lowerCAmelCase__):
def _snake_case ( self : int ):
snake_case_ : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase_ , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(lowercase_ , '''num_heads''' ) )
class _UpperCAmelCase :
def __init__( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : int=13 , lowercase_ : Optional[int]=64 , lowercase_ : Any=3 , lowercase_ : Any=[16, 48, 96] , lowercase_ : List[Any]=[1, 3, 6] , lowercase_ : Union[str, Any]=[1, 2, 10] , lowercase_ : Optional[Any]=[7, 3, 3] , lowercase_ : Union[str, Any]=[4, 2, 2] , lowercase_ : Tuple=[2, 1, 1] , lowercase_ : List[str]=[2, 2, 2] , lowercase_ : Union[str, Any]=[False, False, True] , lowercase_ : Optional[int]=[0.0, 0.0, 0.0] , lowercase_ : str=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : Optional[int]=True , lowercase_ : Optional[int]=True , lowercase_ : Optional[Any]=2 , ):
snake_case_ : List[Any] = parent
snake_case_ : int = batch_size
snake_case_ : Union[str, Any] = image_size
snake_case_ : Tuple = patch_sizes
snake_case_ : List[Any] = patch_stride
snake_case_ : Dict = patch_padding
snake_case_ : Any = is_training
snake_case_ : Any = use_labels
snake_case_ : str = num_labels
snake_case_ : Optional[Any] = num_channels
snake_case_ : Optional[Any] = embed_dim
snake_case_ : int = num_heads
snake_case_ : List[str] = stride_kv
snake_case_ : Any = depth
snake_case_ : Dict = cls_token
snake_case_ : Dict = attention_drop_rate
snake_case_ : int = initializer_range
snake_case_ : Tuple = layer_norm_eps
def _snake_case ( self : Dict ):
snake_case_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : str = None
if self.use_labels:
# create a random int32 tensor of given shape
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ : Dict = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : int ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _snake_case ( self : int , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ):
snake_case_ : Tuple = TFCvtModel(config=lowercase_ )
snake_case_ : Tuple = model(lowercase_ , training=lowercase_ )
snake_case_ : int = (self.image_size, self.image_size)
snake_case_, snake_case_ : List[str] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
snake_case_ : str = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
snake_case_ : str = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _snake_case ( self : Dict , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[str] ):
snake_case_ : int = self.num_labels
snake_case_ : Any = TFCvtForImageClassification(lowercase_ )
snake_case_ : List[Any] = model(lowercase_ , labels=lowercase_ , training=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Any ):
snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_ : List[str] = config_and_inputs
snake_case_ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : Optional[int] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
_lowerCAmelCase : str = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
_lowerCAmelCase : str = False
_lowerCAmelCase : int = False
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : int = False
_lowerCAmelCase : int = False
def _snake_case ( self : int ):
snake_case_ : Optional[int] = TFCvtModelTester(self )
snake_case_ : str = TFCvtConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def _snake_case ( self : int ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='''Cvt does not output attentions''' )
def _snake_case ( self : Any ):
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def _snake_case ( self : str ):
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def _snake_case ( self : Union[str, Any] ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
def _snake_case ( self : Tuple ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def _snake_case ( self : Tuple ):
super().test_keras_fit()
@unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' )
def _snake_case ( self : List[str] ):
snake_case_ : Optional[Any] = tf.keras.mixed_precision.Policy('''mixed_float16''' )
tf.keras.mixed_precision.set_global_policy(lowercase_ )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('''float32''' )
def _snake_case ( self : int ):
snake_case_, snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Dict = model_class(lowercase_ )
snake_case_ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : List[str] = [*signature.parameters.keys()]
snake_case_ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_ )
def _snake_case ( self : List[str] ):
def check_hidden_states_output(lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : str ):
snake_case_ : Any = model_class(lowercase_ )
snake_case_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
snake_case_ : Tuple = outputs.hidden_states
snake_case_ : str = len(self.model_tester.depth )
self.assertEqual(len(lowercase_ ) , lowercase_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case_, snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Dict = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ : Tuple = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def _snake_case ( self : str ):
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _snake_case ( self : List[Any] ):
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def _snake_case ( self : Optional[Any] ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : int = TFCvtModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def __lowercase ( ):
snake_case_ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _UpperCAmelCase ( unittest.TestCase):
@cached_property
def _snake_case ( self : List[Any] ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _snake_case ( self : Tuple ):
snake_case_ : int = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case_ : Any = self.default_image_processor
snake_case_ : Union[str, Any] = prepare_img()
snake_case_ : int = image_processor(images=lowercase_ , return_tensors='''tf''' )
# forward pass
snake_case_ : Tuple = model(**lowercase_ )
# verify the logits
snake_case_ : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
snake_case_ : Tuple = tf.constant([0.92_85, 0.90_15, -0.31_50] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowercase_ , atol=1E-4 ) )
| 485
| 0
|
"""simple docstring"""
import string
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> None:
for key in range(len(string.ascii_uppercase ) ):
a_ : List[str] = ""
for symbol in message:
if symbol in string.ascii_uppercase:
a_ : List[str] = string.ascii_uppercase.find(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = num - key
if num < 0:
a_ : Optional[Any] = num + len(string.ascii_uppercase )
a_ : List[Any] = translated + string.ascii_uppercase[num]
else:
a_ : Union[str, Any] = translated + symbol
print(F"""Decryption using Key #{key}: {translated}""" )
def lowerCAmelCase_ ( ) -> None:
a_ : Union[str, Any] = input("Encrypted message: " )
a_ : Any = message.upper()
decrypt(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 237
|
"""simple docstring"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( a_ ):
__lowerCAmelCase = ["input_values", "attention_mask"]
def __init__( self , a_ = 1 , a_ = 1_6_0_0_0 , a_ = 0.0 , a_ = False , a_ = 8_0 , a_ = 1_6 , a_ = 6_4 , a_ = "hann_window" , a_ = 1.0 , a_ = 8_0 , a_ = 7_6_0_0 , a_ = 1e-10 , a_ = 2 , a_ = True , **a_ , ):
super().__init__(feature_size=a_ , sampling_rate=a_ , padding_value=a_ , **a_ )
a_ : Optional[Any] = do_normalize
a_ : Any = return_attention_mask
a_ : int = num_mel_bins
a_ : int = hop_length
a_ : List[str] = win_length
a_ : Dict = win_function
a_ : Optional[Any] = frame_signal_scale
a_ : List[str] = fmin
a_ : Any = fmax
a_ : str = mel_floor
a_ : int = reduction_factor
a_ : Tuple = win_length * sampling_rate // 1_0_0_0
a_ : int = hop_length * sampling_rate // 1_0_0_0
a_ : Dict = optimal_fft_length(self.sample_size )
a_ : int = (self.n_fft // 2) + 1
a_ : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=a_ )
a_ : List[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , )
if frame_signal_scale != 1.0:
warnings.warn(
"The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , a_ , )
if reduction_factor != 2.0:
warnings.warn(
"The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , a_ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def snake_case_ ( a_ , a_ , a_ = 0.0 ):
if attention_mask is not None:
a_ : int = np.array(a_ , np.intaa )
a_ : Tuple = []
for vector, length in zip(a_ , attention_mask.sum(-1 ) ):
a_ : List[str] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
a_ : Tuple = padding_value
normed_input_values.append(a_ )
else:
a_ : Any = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def snake_case_ ( self , a_ , ):
a_ : Optional[Any] = spectrogram(
a_ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , )
return log_mel_spec.T
def __call__( self , a_ = None , a_ = None , a_ = False , a_ = None , a_ = False , a_ = None , a_ = None , a_ = None , a_ = None , **a_ , ):
if audio is None and audio_target is None:
raise ValueError("You must provide either `audio` or `audio_target` values." )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if audio is not None:
a_ : int = self._process_audio(
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , **a_ , )
else:
a_ : Optional[Any] = None
if audio_target is not None:
a_ : Optional[Any] = self._process_audio(
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , **a_ , )
if inputs is None:
return inputs_target
else:
a_ : Dict = inputs_target["input_values"]
a_ : int = inputs_target.get("attention_mask" )
if decoder_attention_mask is not None:
a_ : List[Any] = decoder_attention_mask
return inputs
def snake_case_ ( self , a_ , a_ = False , a_ = False , a_ = None , a_ = False , a_ = None , a_ = None , a_ = None , **a_ , ):
a_ : List[str] = isinstance(a_ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
a_ : Optional[int] = is_batched_numpy or (
isinstance(a_ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a_ : str = [np.asarray(a_ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(a_ , np.ndarray ):
a_ : Dict = np.asarray(a_ , dtype=np.floataa )
elif isinstance(a_ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
a_ : Tuple = speech.astype(np.floataa )
# always return batch
if not is_batched:
a_ : Union[str, Any] = [speech]
# needed to make pad() work on spectrogram inputs
a_ : List[str] = self.feature_size
# convert into correct format for padding
if is_target:
a_ : Dict = [self._extract_mel_features(a_ ) for waveform in speech]
a_ : List[Any] = BatchFeature({"input_values": features} )
a_ : str = self.num_mel_bins
else:
a_ : List[str] = BatchFeature({"input_values": speech} )
a_ : Any = self.pad(
a_ , padding=a_ , max_length=a_ , truncation=a_ , pad_to_multiple_of=a_ , return_attention_mask=a_ , **a_ , )
a_ : Tuple = feature_size_hack
# convert input values to correct format
a_ : Union[str, Any] = padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
a_ : str = [np.asarray(a_ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(a_ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
a_ : Dict = [array.astype(np.floataa ) for array in input_values]
elif isinstance(a_ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
a_ : int = input_values.astype(np.floataa )
# convert attention_mask to correct format
a_ : Union[str, Any] = padded_inputs.get("attention_mask" )
if attention_mask is not None:
a_ : Union[str, Any] = [np.asarray(a_ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
a_ : int = (
attention_mask
if self._get_padding_strategies(a_ , max_length=a_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
a_ : Dict = self.zero_mean_unit_var_norm(
padded_inputs["input_values"] , attention_mask=a_ , padding_value=self.padding_value )
if return_tensors is not None:
a_ : Optional[Any] = padded_inputs.convert_to_tensors(a_ )
return padded_inputs
def snake_case_ ( self ):
a_ : int = super().to_dict()
# Don't serialize these as they are derived from the other properties.
a_ : List[str] = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output
| 237
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :Optional[Any] = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Tuple = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
lowercase__ :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 713
|
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Dict:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = checkpoints.load_tax_checkpoint(UpperCAmelCase_ )
__UpperCAmelCase : Any = flatten_dict(UpperCAmelCase_ )
return flax_params
def lowerCamelCase_ ( UpperCAmelCase_ ) ->List[str]:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Any = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
__UpperCAmelCase : Optional[int] = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__UpperCAmelCase : Optional[Any] = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__UpperCAmelCase : List[str] = new_key.replace(UpperCAmelCase_ , UpperCAmelCase_ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__UpperCAmelCase : Dict = new_key.replace(UpperCAmelCase_ , UpperCAmelCase_ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__UpperCAmelCase : Dict = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__UpperCAmelCase : Any = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , UpperCAmelCase_ )
__UpperCAmelCase : Dict = flax_dict[key]
__UpperCAmelCase : Tuple = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__UpperCAmelCase : List[str] = torch.from_numpy(converted_dict[key].T )
else:
__UpperCAmelCase : Optional[int] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False , UpperCAmelCase_=False ) ->Dict:
"""simple docstring"""
__UpperCAmelCase : List[Any] = get_flax_param(UpperCAmelCase_ )
if not use_large:
__UpperCAmelCase : List[str] = PixaStructVisionConfig()
__UpperCAmelCase : Optional[Any] = PixaStructTextConfig()
else:
__UpperCAmelCase : Optional[Any] = PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
__UpperCAmelCase : Optional[Any] = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
__UpperCAmelCase : Dict = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = PixaStructForConditionalGeneration(UpperCAmelCase_ )
__UpperCAmelCase : Any = rename_and_convert_flax_params(UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
__UpperCAmelCase : Union[str, Any] = PixaStructImageProcessor()
__UpperCAmelCase : Optional[int] = PixaStructProcessor(image_processor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
if use_large:
__UpperCAmelCase : str = 40_96
__UpperCAmelCase : str = True
# mkdir if needed
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
print('''Model saved in {}'''.format(UpperCAmelCase_ ) )
if __name__ == "__main__":
lowercase__ :List[str] = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
lowercase__ :Optional[Any] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 374
| 0
|
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class _a :
"""simple docstring"""
def __init__( self : int )->Union[str, Any]:
_UpperCAmelCase = psutil.Process()
_UpperCAmelCase = False
def lowercase__ ( self : int )->str:
_UpperCAmelCase = -1
while True:
_UpperCAmelCase = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def lowercase__ ( self : Any )->Any:
_UpperCAmelCase = True
_UpperCAmelCase = threading.Thread(target=self.peak_monitor )
_UpperCAmelCase = True
self.thread.start()
def lowercase__ ( self : List[Any] )->Optional[int]:
_UpperCAmelCase = False
self.thread.join()
return self.cpu_memory_peak
__A : Any = PeakCPUMemory()
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = {'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_UpperCAmelCase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
_UpperCAmelCase = torch.cuda.memory_allocated(_SCREAMING_SNAKE_CASE )
torch.cuda.reset_peak_memory_stats()
return measures
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase = {'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_UpperCAmelCase = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
_UpperCAmelCase = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
_UpperCAmelCase = (torch.cuda.memory_allocated(_SCREAMING_SNAKE_CASE ) - start_measures[str(_SCREAMING_SNAKE_CASE )]) / 2**20
_UpperCAmelCase = (torch.cuda.max_memory_allocated(_SCREAMING_SNAKE_CASE ) - start_measures[str(_SCREAMING_SNAKE_CASE )]) / 2**20
return measures
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
print(f'{description}:' )
print(f'- Time: {measures["time"]:.2f}s' )
for i in range(torch.cuda.device_count() ):
print(f'- GPU {i} allocated: {measures[str(_SCREAMING_SNAKE_CASE )]:.2f}MiB' )
_UpperCAmelCase = measures[f'{i}-peak']
print(f'- GPU {i} peak: {peak:.2f}MiB' )
print(f'- CPU RAM allocated: {measures["cpu"]:.2f}MiB' )
print(f'- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB' )
| 602
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
class _a :
"""simple docstring"""
def __init__( self : int , __UpperCamelCase : list[str] )->Dict:
_UpperCAmelCase = []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(__UpperCamelCase )
self.set_fail_transitions()
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : str )->int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowercase__ ( self : Tuple , __UpperCamelCase : str )->None:
_UpperCAmelCase = 0
for character in keyword:
_UpperCAmelCase = self.find_next_state(__UpperCamelCase , __UpperCamelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
_UpperCAmelCase = len(self.adlist ) - 1
else:
_UpperCAmelCase = next_state
self.adlist[current_state]["output"].append(__UpperCamelCase )
def lowercase__ ( self : List[str] )->None:
_UpperCAmelCase = deque()
for node in self.adlist[0]["next_states"]:
q.append(__UpperCamelCase )
_UpperCAmelCase = 0
while q:
_UpperCAmelCase = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__UpperCamelCase )
_UpperCAmelCase = self.adlist[r]['''fail_state''']
while (
self.find_next_state(__UpperCamelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
_UpperCAmelCase = self.adlist[state]['''fail_state''']
_UpperCAmelCase = self.find_next_state(
__UpperCamelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
_UpperCAmelCase = 0
_UpperCAmelCase = (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def lowercase__ ( self : Any , __UpperCamelCase : str )->dict[str, list[int]]:
_UpperCAmelCase = {} # returns a dict with keywords and list of its occurrences
_UpperCAmelCase = 0
for i in range(len(__UpperCamelCase ) ):
while (
self.find_next_state(__UpperCamelCase , string[i] ) is None
and current_state != 0
):
_UpperCAmelCase = self.adlist[current_state]['''fail_state''']
_UpperCAmelCase = self.find_next_state(__UpperCamelCase , string[i] )
if next_state is None:
_UpperCAmelCase = 0
else:
_UpperCAmelCase = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
_UpperCAmelCase = []
result[key].append(i - len(__UpperCamelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 602
| 1
|
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"artists_file": "artists.json",
"lyrics_file": "lyrics.json",
"genres_file": "genres.json",
}
UpperCamelCase = {
"artists_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json",
},
"genres_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json",
},
"lyrics_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json",
},
}
UpperCamelCase = {
"jukebox": 512,
}
class lowerCAmelCase_ ( __lowerCAmelCase ):
_UpperCamelCase : int = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Tuple = PRETRAINED_LYRIC_TOKENS_SIZES
_UpperCamelCase : Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=["v3", "v2", "v2"] , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=5 , _lowerCAmelCase="<|endoftext|>" , **_lowerCAmelCase , ):
_lowercase : Tuple = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token
super().__init__(
unk_token=lowerCAmelCase_ , n_genres=lowerCAmelCase_ , version=lowerCAmelCase_ , max_n_lyric_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , )
_lowercase : Union[str, Any] = version
_lowercase : List[str] = max_n_lyric_tokens
_lowercase : Tuple = n_genres
with open(lowerCAmelCase_ , encoding='utf-8' ) as vocab_handle:
_lowercase : Union[str, Any] = json.load(lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding='utf-8' ) as vocab_handle:
_lowercase : Dict = json.load(lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding='utf-8' ) as vocab_handle:
_lowercase : Optional[Any] = json.load(lowerCAmelCase_ )
_lowercase : Union[str, Any] = r'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
_lowercase : Optional[int] = oov.replace(r'\-\'' , r'\-+\'' )
_lowercase : Optional[int] = regex.compile(lowerCAmelCase_ )
_lowercase : str = {v: k for k, v in self.artists_encoder.items()}
_lowercase : Tuple = {v: k for k, v in self.genres_encoder.items()}
_lowercase : Tuple = {v: k for k, v in self.lyrics_encoder.items()}
@property
def __a ( self ):
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def __a ( self ):
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = [self.artists_encoder.get(lowerCAmelCase_ , 0 ) for artist in list_artists]
for genres in range(len(lowerCAmelCase_ ) ):
_lowercase : Dict = [self.genres_encoder.get(lowerCAmelCase_ , 0 ) for genre in list_genres[genres]]
_lowercase : int = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
_lowercase : Any = [[self.lyrics_encoder.get(lowerCAmelCase_ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def __a ( self , _lowerCAmelCase ):
return list(lowerCAmelCase_ )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ):
_lowercase , _lowercase , _lowercase : Tuple = self.prepare_for_tokenization(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_lowercase : Tuple = self._tokenize(lowerCAmelCase_ )
return artist, genre, lyrics
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = False ):
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
_lowercase : Tuple = artists[idx].lower()
_lowercase : Tuple = [genres[idx].lower()]
else:
_lowercase : Union[str, Any] = self._normalize(artists[idx] ) + '.v2'
_lowercase : Optional[Any] = [
self._normalize(lowerCAmelCase_ ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
_lowercase : int = regex.compile(r'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
_lowercase : Any = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
_lowercase : Optional[int] = {vocab[index]: index + 1 for index in range(len(lowerCAmelCase_ ) )}
_lowercase : Optional[Any] = 0
_lowercase : List[Any] = len(lowerCAmelCase_ ) + 1
_lowercase : Tuple = self.vocab
_lowercase : int = {v: k for k, v in self.vocab.items()}
_lowercase : Union[str, Any] = ''
else:
_lowercase : Any = regex.compile(r'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
_lowercase : List[str] = self._run_strip_accents(lowerCAmelCase_ )
_lowercase : Optional[Any] = lyrics.replace('\\' , '\n' )
_lowercase : int = self.out_of_vocab.sub('' , lowerCAmelCase_ ), [], []
return artists, genres, lyrics
def __a ( self , _lowerCAmelCase ):
_lowercase : Union[str, Any] = unicodedata.normalize('NFD' , lowerCAmelCase_ )
_lowercase : int = []
for char in text:
_lowercase : int = unicodedata.category(lowerCAmelCase_ )
if cat == "Mn":
continue
output.append(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
def __a ( self , _lowerCAmelCase ):
_lowercase : Union[str, Any] = (
[chr(lowerCAmelCase_ ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(lowerCAmelCase_ ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(lowerCAmelCase_ ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
_lowercase : int = frozenset(lowerCAmelCase_ )
_lowercase : List[Any] = re.compile(r'_+' )
_lowercase : Any = ''.join([c if c in accepted else '_' for c in text.lower()] )
_lowercase : Dict = pattern.sub('_' , lowerCAmelCase_ ).strip('_' )
return text
def __a ( self , _lowerCAmelCase ):
return " ".join(lowerCAmelCase_ )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ):
# Convert to TensorType
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase : int = TensorType(lowerCAmelCase_ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
_lowercase : Optional[int] = tf.constant
_lowercase : int = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
_lowercase : List[Any] = torch.tensor
_lowercase : List[str] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
_lowercase : int = jnp.array
_lowercase : List[str] = _is_jax
else:
_lowercase : Tuple = np.asarray
_lowercase : Dict = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
_lowercase : List[str] = [inputs]
if not is_tensor(lowerCAmelCase_ ):
_lowercase : Tuple = as_tensor(lowerCAmelCase_ )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="" , _lowerCAmelCase="pt" ):
_lowercase : Tuple = [0, 0, 0]
_lowercase : Optional[int] = [artist] * len(self.version )
_lowercase : str = [genres] * len(self.version )
_lowercase , _lowercase , _lowercase : Union[str, Any] = self.tokenize(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_lowercase , _lowercase , _lowercase : Optional[Any] = self._convert_token_to_id(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_lowercase : Dict = [-INFINITY] * len(full_tokens[-1] )
_lowercase : str = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=lowerCAmelCase_ )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowercase : Optional[Any] = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=lowerCAmelCase_ ) )
_lowercase : Optional[Any] = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=lowerCAmelCase_ ) )
_lowercase : Tuple = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=lowerCAmelCase_ ) )
return (artists_file, genres_file, lyrics_file)
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[str] = self.artists_decoder.get(lowerCAmelCase_ )
_lowercase : List[str] = [self.genres_decoder.get(lowerCAmelCase_ ) for genre in genres_index]
_lowercase : List[Any] = [self.lyrics_decoder.get(lowerCAmelCase_ ) for character in lyric_index]
return artist, genres, lyrics
| 717
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , _lowerCAmelCase=1_0_0_0 , ):
_lowercase : List[str] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : str = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_input_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Optional[Any] = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[Any] = hidden_act
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : int = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Tuple = type_sequence_label_size
_lowercase : Dict = initializer_range
_lowercase : List[Any] = num_labels
_lowercase : List[str] = num_choices
_lowercase : Dict = scope
_lowercase : List[Any] = range_bbox
def __a ( self ):
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowercase : List[str] = bbox[i, j, 3]
_lowercase : Optional[int] = bbox[i, j, 1]
_lowercase : int = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowercase : Dict = bbox[i, j, 2]
_lowercase : Dict = bbox[i, j, 0]
_lowercase : int = t
_lowercase : Union[str, Any] = tf.convert_to_tensor(_lowerCAmelCase )
_lowercase : Any = None
if self.use_input_mask:
_lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Tuple = None
if self.use_token_type_ids:
_lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Tuple = None
_lowercase : Union[str, Any] = None
_lowercase : List[str] = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Any = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFLayoutLMModel(config=_lowerCAmelCase )
_lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowercase : List[str] = model(_lowerCAmelCase , _lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFLayoutLMForMaskedLM(config=_lowerCAmelCase )
_lowercase : Any = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : str = self.num_labels
_lowercase : Tuple = TFLayoutLMForSequenceClassification(config=_lowerCAmelCase )
_lowercase : int = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = self.num_labels
_lowercase : Optional[int] = TFLayoutLMForTokenClassification(config=_lowerCAmelCase )
_lowercase : Union[str, Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering(config=_lowerCAmelCase )
_lowercase : str = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ):
_lowercase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : List[Any] = config_and_inputs
_lowercase : Optional[Any] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Optional[int] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : str = False
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = 10
def __a ( self ):
_lowercase : Optional[int] = TFLayoutLMModelTester(self )
_lowercase : str = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
@slow
def __a ( self ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : List[Any] = TFLayoutLMModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def __a ( self ):
pass
def __magic_name__ ( ) -> Optional[int]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
_lowercase : Optional[Any] = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231
_lowercase : Tuple = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
_lowercase : Optional[int] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231
_lowercase : int = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
_lowercase : Union[str, Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : Tuple = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Tuple = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
# test the sequence output on [0, :3, :3]
_lowercase : Optional[Any] = tf.convert_to_tensor(
[[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-3 ) )
# test the pooled output on [1, :3]
_lowercase : Optional[int] = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _lowerCAmelCase , atol=1E-3 ) )
@slow
def __a ( self ):
# initialize model with randomly initialized sequence classification head
_lowercase : Optional[Any] = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[Any] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Any = model(
input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
_lowercase : List[Any] = outputs.loss
_lowercase : Any = (2,)
self.assertEqual(loss.shape , _lowerCAmelCase )
# test the shape of the logits
_lowercase : str = outputs.logits
_lowercase : Dict = (2, 2)
self.assertEqual(logits.shape , _lowerCAmelCase )
@slow
def __a ( self ):
# initialize model with randomly initialized token classification head
_lowercase : Dict = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=1_3 )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : str = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Dict = model(
input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
# test the shape of the logits
_lowercase : Dict = outputs.logits
_lowercase : Optional[Any] = tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape , _lowerCAmelCase )
@slow
def __a ( self ):
# initialize model with randomly initialized token classification head
_lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : List[Any] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : int = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
# test the shape of the logits
_lowercase : Any = tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape , _lowerCAmelCase )
self.assertEqual(outputs.end_logits.shape , _lowerCAmelCase )
| 677
| 0
|
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__snake_case : Any ={
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def lowerCAmelCase__ ( lowerCamelCase_ : Tuple ,lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : int ,lowerCamelCase_ : str ,lowerCamelCase_ : Any ,lowerCamelCase_ : Optional[Any]):
'''simple docstring'''
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""")
if not ops[op](version.parse(lowerCamelCase_) ,version.parse(lowerCamelCase_)):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""")
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : Optional[str] = None):
'''simple docstring'''
lowerCAmelCase__ : Tuple = f"""\n{hint}""" if hint is not None else ''''''
# non-versioned check
if re.match(r'''^[\w_\-\d]+$''' ,lowerCamelCase_):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = requirement, None, None
else:
lowerCAmelCase__ : Optional[int] = re.findall(r'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' ,lowerCamelCase_)
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f""" got {requirement}""")
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = match[0]
lowerCAmelCase__ : Tuple = want_full.split(''',''') # there could be multiple requirements
lowerCAmelCase__ : List[str] = {}
for w in want_range:
lowerCAmelCase__ : List[Any] = re.findall(r'''^([\s!=<>]{1,2})(.+)''' ,lowerCamelCase_)
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f""" but got {requirement}""")
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = match[0]
lowerCAmelCase__ : Optional[int] = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys())}, but got {op}""")
# special case
if pkg == "python":
lowerCAmelCase__ : Dict = '''.'''.join([str(lowerCamelCase_) for x in sys.version_info[:3]])
for op, want_ver in wanted.items():
_compare_versions(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
return
# check if any version is installed
try:
lowerCAmelCase__ : Optional[int] = importlib.metadata.version(lowerCamelCase_)
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""")
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : str):
'''simple docstring'''
lowerCAmelCase__ : Any = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(lowerCamelCase_ ,lowerCamelCase_)
| 647
|
from collections.abc import Callable
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ,__lowerCamelCase = None ) -> None:
"""simple docstring"""
lowerCAmelCase__ : list = []
# Stores indexes of each item for supporting updates and deletion.
lowerCAmelCase__ : dict = {}
# Stores current size of heap.
lowerCAmelCase__ : Any = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowerCAmelCase__ : Union[str, Any] = key or (lambda __lowerCamelCase : x)
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> int | None:
"""simple docstring"""
return int((i - 1) / 2 ) if i > 0 else None
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> int | None:
"""simple docstring"""
lowerCAmelCase__ : Any = int(2 * i + 1 )
return left if 0 < left < self.size else None
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> int | None:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = int(2 * i + 2 )
return right if 0 < right < self.size else None
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : str = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.arr[j], self.arr[i]
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> bool:
"""simple docstring"""
return self.arr[i][1] < self.arr[j][1]
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Any = self._left(__lowerCamelCase )
lowerCAmelCase__ : List[str] = self._right(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = i
if left is not None and not self._cmp(__lowerCamelCase ,__lowerCamelCase ):
lowerCAmelCase__ : Any = left
if right is not None and not self._cmp(__lowerCamelCase ,__lowerCamelCase ):
lowerCAmelCase__ : Union[str, Any] = right
return valid_parent
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> None:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self._parent(__lowerCamelCase )
while parent is not None and not self._cmp(__lowerCamelCase ,__lowerCamelCase ):
self._swap(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = parent, self._parent(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Any = self._get_valid_parent(__lowerCamelCase )
while valid_parent != index:
self._swap(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = valid_parent, self._get_valid_parent(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
if item not in self.pos_map:
return
lowerCAmelCase__ : str = self.pos_map[item]
lowerCAmelCase__ : Tuple = [item, self.key(__lowerCamelCase )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(__lowerCamelCase )
self._heapify_down(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> None:
"""simple docstring"""
if item not in self.pos_map:
return
lowerCAmelCase__ : Optional[int] = self.pos_map[item]
del self.pos_map[item]
lowerCAmelCase__ : Dict = self.arr[self.size - 1]
lowerCAmelCase__ : Tuple = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(__lowerCamelCase )
self._heapify_down(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Dict = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(__lowerCamelCase )] )
else:
lowerCAmelCase__ : List[str] = [item, self.key(__lowerCamelCase )]
lowerCAmelCase__ : List[str] = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def lowerCAmelCase__ (self ) -> tuple | None:
"""simple docstring"""
return self.arr[0] if self.size else None
def lowerCAmelCase__ (self ) -> tuple | None:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCAmelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 647
| 1
|
import os
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = os.path.join(os.path.dirname(_A ) , "num.txt" )
with open(_A ) as file_hand:
return str(sum(int(_A ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 704
|
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = DistilBertTokenizer
lowerCAmelCase_ = DistilBertTokenizerFast
lowerCAmelCase_ = True
@slow
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
snake_case_ = tokenizer.encode("sequence builders" , add_special_tokens=__lowercase )
snake_case_ = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowercase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(__lowercase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 139
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCamelCase : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCamelCase : Tuple = '''
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)["depth"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline("depth-estimation")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to("cuda")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> img = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/cat.png"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
>>> prompt = "A robot, 4k photo"
>>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
>>> generator = torch.Generator(device="cuda").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save("robot_cat.png")
```
'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any=8 ):
lowerCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a ( a__ ):
def __init__( self , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=_snake_case , scheduler=_snake_case , movq=_snake_case , )
lowerCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if latents is None:
lowerCAmelCase = randn_tensor(_snake_case , generator=_snake_case , device=_snake_case , dtype=_snake_case )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCAmelCase = latents.to(_snake_case )
lowerCAmelCase = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase__ ( self , _snake_case=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCAmelCase = torch.device(F'cuda:{gpu_id}' )
lowerCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_snake_case , _snake_case )
def UpperCamelCase__ ( self , _snake_case=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowerCAmelCase = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCAmelCase ,lowerCAmelCase = cpu_offload_with_hook(_snake_case , _snake_case , prev_module_hook=_snake_case )
# We'll offload the last model manually.
lowerCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_snake_case , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_snake_case )
def __call__( self , _snake_case , _snake_case , _snake_case , _snake_case = 5_12 , _snake_case = 5_12 , _snake_case = 1_00 , _snake_case = 4.0 , _snake_case = 1 , _snake_case = None , _snake_case = None , _snake_case = "pil" , _snake_case = True , ):
"""simple docstring"""
lowerCAmelCase = self._execution_device
lowerCAmelCase = guidance_scale > 1.0
if isinstance(_snake_case , _snake_case ):
lowerCAmelCase = torch.cat(_snake_case , dim=0 )
if isinstance(_snake_case , _snake_case ):
lowerCAmelCase = torch.cat(_snake_case , dim=0 )
if isinstance(_snake_case , _snake_case ):
lowerCAmelCase = torch.cat(_snake_case , dim=0 )
lowerCAmelCase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
lowerCAmelCase = image_embeds.repeat_interleave(_snake_case , dim=0 )
lowerCAmelCase = negative_image_embeds.repeat_interleave(_snake_case , dim=0 )
lowerCAmelCase = hint.repeat_interleave(_snake_case , dim=0 )
lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_snake_case )
lowerCAmelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_snake_case )
self.scheduler.set_timesteps(_snake_case , device=_snake_case )
lowerCAmelCase = self.scheduler.timesteps
lowerCAmelCase = self.movq.config.latent_channels
lowerCAmelCase ,lowerCAmelCase = downscale_height_and_width(_snake_case , _snake_case , self.movq_scale_factor )
# create initial latent
lowerCAmelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _snake_case , _snake_case , _snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(_snake_case ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase = {'image_embeds': image_embeds, 'hint': hint}
lowerCAmelCase = self.unet(
sample=_snake_case , timestep=_snake_case , encoder_hidden_states=_snake_case , added_cond_kwargs=_snake_case , return_dict=_snake_case , )[0]
if do_classifier_free_guidance:
lowerCAmelCase ,lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
lowerCAmelCase ,lowerCAmelCase = noise_pred.chunk(2 )
lowerCAmelCase ,lowerCAmelCase = variance_pred.chunk(2 )
lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCAmelCase ,lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase = self.scheduler.step(
_snake_case , _snake_case , _snake_case , generator=_snake_case , )[0]
# post-processing
lowerCAmelCase = self.movq.decode(_snake_case , force_not_quantize=_snake_case )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
lowerCAmelCase = image * 0.5 + 0.5
lowerCAmelCase = image.clamp(0 , 1 )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase = self.numpy_to_pil(_snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case )
| 4
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class a ( a__ ):
snake_case__ = '''bert'''
def __init__( self , _snake_case=3_05_22 , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = classifier_dropout
class a ( a__ ):
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 4
| 1
|
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCAmelCase_ ( a_ , a_ ):
@register_to_config
def __init__( self : Any, *,
_snake_case : int = 4, _snake_case : int = 768, _snake_case : int, _snake_case : Any, ):
'''simple docstring'''
super().__init__()
snake_case : int =nn.Parameter(torch.zeros(_snake_case ) )
# parameters for additional clip time embeddings
snake_case : Dict =nn.Linear(_snake_case, _snake_case )
snake_case : str =nn.Linear(_snake_case, _snake_case )
# parameters for encoder hidden states
snake_case : Union[str, Any] =clip_extra_context_tokens
snake_case : Optional[int] =nn.Linear(
_snake_case, self.clip_extra_context_tokens * cross_attention_dim )
snake_case : Dict =nn.Linear(_snake_case, _snake_case )
snake_case : Dict =nn.LayerNorm(_snake_case )
def __snake_case ( self : Tuple, *, _snake_case : Optional[int], _snake_case : Dict, _snake_case : List[str], _snake_case : Optional[int] ):
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
snake_case : Optional[Any] =image_embeddings.shape[0]
snake_case : Tuple =self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
snake_case : Optional[int] =classifier_free_guidance_embeddings.expand(
_snake_case, -1 )
snake_case : Tuple =torch.cat([classifier_free_guidance_embeddings, image_embeddings], dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
snake_case : Optional[int] =prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
snake_case : int =self.embedding_proj(_snake_case )
snake_case : Optional[int] =self.clip_image_embeddings_project_to_time_embeddings(_snake_case )
snake_case : int =time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
snake_case : str =self.clip_extra_context_tokens_proj(_snake_case )
snake_case : List[str] =clip_extra_context_tokens.reshape(_snake_case, -1, self.clip_extra_context_tokens )
snake_case : Tuple =clip_extra_context_tokens.permute(0, 2, 1 )
snake_case : Union[str, Any] =self.encoder_hidden_states_proj(_snake_case )
snake_case : Optional[int] =self.text_encoder_hidden_states_norm(_snake_case )
snake_case : Dict =torch.cat([clip_extra_context_tokens, text_encoder_hidden_states], dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 136
|
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCAmelCase_ ( a_ , a_ ):
__UpperCAmelCase = 'pixel_values'
__UpperCAmelCase = False
__UpperCAmelCase = TimmBackboneConfig
def __init__( self : Optional[Any], _snake_case : Any, **_snake_case : Tuple ):
'''simple docstring'''
requires_backends(self, '''timm''' )
super().__init__(_snake_case )
snake_case : Optional[Any] =config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(f'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(_snake_case, '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
snake_case : Optional[Any] =getattr(_snake_case, '''use_pretrained_backbone''', _snake_case )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
snake_case : Union[str, Any] =config.out_indices if getattr(_snake_case, '''out_indices''', _snake_case ) is not None else (-1,)
snake_case : str =timm.create_model(
config.backbone, pretrained=_snake_case, features_only=config.features_only, in_chans=config.num_channels, out_indices=_snake_case, **_snake_case, )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
snake_case : Any =self._backbone.return_layers
snake_case : Tuple ={layer['''module''']: str(_snake_case ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(_snake_case )
@classmethod
def __snake_case ( cls : Dict, _snake_case : Any, *_snake_case : List[str], **_snake_case : Optional[int] ):
'''simple docstring'''
requires_backends(cls, ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
snake_case : List[str] =kwargs.pop('''config''', TimmBackboneConfig() )
snake_case : Tuple =kwargs.pop('''use_timm_backbone''', _snake_case )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
snake_case : int =kwargs.pop('''num_channels''', config.num_channels )
snake_case : Optional[int] =kwargs.pop('''features_only''', config.features_only )
snake_case : Optional[int] =kwargs.pop('''use_pretrained_backbone''', config.use_pretrained_backbone )
snake_case : Optional[int] =kwargs.pop('''out_indices''', config.out_indices )
snake_case : List[str] =TimmBackboneConfig(
backbone=_snake_case, num_channels=_snake_case, features_only=_snake_case, use_pretrained_backbone=_snake_case, out_indices=_snake_case, )
return super()._from_config(_snake_case, **_snake_case )
def __snake_case ( self : Any, _snake_case : Dict ):
'''simple docstring'''
pass
def __snake_case ( self : List[Any], _snake_case : Dict, _snake_case : Tuple=None, _snake_case : int=None, _snake_case : Dict=None, **_snake_case : Any ):
'''simple docstring'''
snake_case : Dict =return_dict if return_dict is not None else self.config.use_return_dict
snake_case : str =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case : Dict =output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
snake_case : Union[str, Any] =self._all_layers
snake_case : List[str] =self._backbone(_snake_case, **_snake_case )
snake_case : Tuple =self._return_layers
snake_case : Tuple =tuple(hidden_states[i] for i in self.out_indices )
else:
snake_case : Optional[Any] =self._backbone(_snake_case, **_snake_case )
snake_case : List[str] =None
snake_case : List[Any] =tuple(_snake_case )
snake_case : Union[str, Any] =tuple(_snake_case ) if hidden_states is not None else None
if not return_dict:
snake_case : Optional[int] =(feature_maps,)
if output_hidden_states:
snake_case : Dict =output + (hidden_states,)
return output
return BackboneOutput(feature_maps=_snake_case, hidden_states=_snake_case, attentions=_snake_case )
| 136
| 1
|
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :List[str] = MvpTokenizer
_UpperCAmelCase :Tuple = MvpTokenizerFast
_UpperCAmelCase :Tuple = True
_UpperCAmelCase :str = filter_roberta_detectors
def __UpperCamelCase( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase : List[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCamelCase : Tuple = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase : Tuple = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCamelCase : int = {"unk_token": "<unk>"}
UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(A_ ) )
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A_ )
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def __UpperCamelCase( self ):
'''simple docstring'''
return MvpTokenizer.from_pretrained("RUCAIBox/mvp" )
@cached_property
def __UpperCamelCase( self ):
'''simple docstring'''
return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp" )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCamelCase : Tuple = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase : Optional[int] = tokenizer(A_ , max_length=len(A_ ) , padding=A_ , return_tensors="pt" )
self.assertIsInstance(A_ , A_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCamelCase : Any = batch.input_ids.tolist()[0]
self.assertListEqual(A_ , A_ )
# Test that special tokens are reset
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase : List[str] = tokenizer(A_ , padding=A_ , return_tensors="pt" )
# check if input_ids are returned and no labels
self.assertIn("input_ids" , A_ )
self.assertIn("attention_mask" , A_ )
self.assertNotIn("labels" , A_ )
self.assertNotIn("decoder_attention_mask" , A_ )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase : List[str] = tokenizer(text_target=A_ , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase : Optional[Any] = tokenizer(
["I am a small frog" * 1024, "I am a small frog"] , padding=A_ , truncation=A_ , return_tensors="pt" )
self.assertIsInstance(A_ , A_ )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = ["A long paragraph for summarization."]
UpperCamelCase : Union[str, Any] = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase : List[str] = tokenizer(A_ , text_target=A_ , return_tensors="pt" )
UpperCamelCase : Union[str, Any] = inputs["input_ids"]
UpperCamelCase : Tuple = inputs["labels"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
def __UpperCamelCase( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
UpperCamelCase : int = self.tokenizer_class.from_pretrained(A_ , **A_ )
UpperCamelCase : int = "A, <mask> AllenNLP sentence."
UpperCamelCase : int = tokenizer_r.encode_plus(A_ , add_special_tokens=A_ , return_token_type_ids=A_ )
UpperCamelCase : Optional[Any] = tokenizer_p.encode_plus(A_ , add_special_tokens=A_ , return_token_type_ids=A_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCamelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCamelCase : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
A_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
A_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 629
|
from __future__ import annotations
import math
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> float:
UpperCamelCase : Tuple = u
for i in range(1 , _lowerCAmelCase ):
UpperCamelCase : Any = temp * (u - i)
return temp
def A_ ( ) -> None:
UpperCamelCase : Union[str, Any] = int(input("enter the numbers of values: " ) )
UpperCamelCase : list[list[float]] = []
for _ in range(_lowerCAmelCase ):
y.append([] )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
y[i].append(_lowerCAmelCase )
UpperCamelCase : List[str] = 0
print("enter the values of parameters in a list: " )
UpperCamelCase : Optional[Any] = list(map(_lowerCAmelCase , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(_lowerCAmelCase ):
UpperCamelCase : Tuple = float(input() )
UpperCamelCase : Any = int(input("enter the value to interpolate: " ) )
UpperCamelCase : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , _lowerCAmelCase ):
for j in range(n - i ):
UpperCamelCase : Optional[Any] = y[j + 1][i - 1] - y[j][i - 1]
UpperCamelCase : List[str] = y[0][0]
for i in range(1 , _lowerCAmelCase ):
summ += (ucal(_lowerCAmelCase , _lowerCAmelCase ) * y[0][i]) / math.factorial(_lowerCAmelCase )
print(F"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 629
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
UpperCamelCase__ = '''
Human: <<task>>
Assistant: '''
UpperCamelCase__ = '''huggingface-tools/default-prompts'''
UpperCamelCase__ = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def UpperCAmelCase__ ( _A , _A , _A="run" ):
"""simple docstring"""
if prompt_or_repo_id is None:
a_ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , _A ) is not None:
return prompt_or_repo_id
a_ = cached_file(
_A , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} )
with open(_A , '''r''' , encoding='''utf-8''' ) as f:
return f.read()
| 143
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __lowercase ( a__ ):
_lowerCAmelCase = ""
_lowerCAmelCase = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self : int , lowercase__ : Optional[DatasetInfo] = None , lowercase__ : Optional[str] = None , **lowercase__ : List[Any] , ):
super().__init__(self , **lowercase__ )
a_ = repo_info
a_ = token
a_ = None
def __magic_name__ ( self : List[str] ):
if self.dir_cache is None:
a_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
a_ = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(lowercase__ ): {'''name''': str(lowercase__ ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __magic_name__ ( self : Dict , lowercase__ : str , lowercase__ : str = "rb" , **lowercase__ : Dict , ):
if not isinstance(self.repo_info , lowercase__ ):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" )
a_ = hf_hub_url(self.repo_info.id , lowercase__ , revision=self.repo_info.sha )
return fsspec.open(
lowercase__ , mode=lowercase__ , headers=get_authentication_headers_for_url(lowercase__ , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def __magic_name__ ( self : Optional[Any] , lowercase__ : Dict , **lowercase__ : Optional[int] ):
self._get_dirs()
a_ = self._strip_protocol(lowercase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowercase__ )
def __magic_name__ ( self : Dict , lowercase__ : Dict , lowercase__ : List[Any]=False , **lowercase__ : Tuple ):
self._get_dirs()
a_ = PurePosixPath(path.strip('''/''' ) )
a_ = {}
for p, f in self.dir_cache.items():
a_ = PurePosixPath(p.strip('''/''' ) )
a_ = p.parent
if root == path:
a_ = f
a_ = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 143
| 1
|
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _snake_case ( UpperCAmelCase_ ):
def __init__( self):
'''simple docstring'''
lowercase__ : List[Any] = []
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_init_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_train_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_train_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_epoch_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_epoch_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_step_begin""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_step_end""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_evaluate""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_predict""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_save""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_log""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.events.append("""on_prediction_step""")
@require_torch
class _snake_case ( unittest.TestCase ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = tempfile.mkdtemp()
def lowercase__ ( self):
'''simple docstring'''
shutil.rmtree(self.output_dir)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Any = RegressionDataset(length=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = RegressionDataset(length=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = RegressionModelConfig(a=SCREAMING_SNAKE_CASE_ , b=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE_ , report_to=[] , **SCREAMING_SNAKE_CASE_)
return Trainer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , callbacks=SCREAMING_SNAKE_CASE_ , )
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_))
# Order doesn't matter
lowercase__ : str = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__)
lowercase__ : Tuple = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__)
for cba, cba in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(SCREAMING_SNAKE_CASE_ , cba.__class__)
elif not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE_)
else:
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : int = ["""on_init_end""", """on_train_begin"""]
lowercase__ : Union[str, Any] = 0
lowercase__ : Union[str, Any] = len(trainer.get_eval_dataloader())
lowercase__ : Dict = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader()) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs):
expected_events.append("""on_epoch_begin""")
for _ in range(SCREAMING_SNAKE_CASE_):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""")
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""")
expected_events.append("""on_epoch_end""")
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = self.get_trainer()
lowercase__ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# Callbacks passed at init are added to the default callbacks
lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback])
expected_callbacks.append(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ : Any = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ : Tuple = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.remove(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = self.get_trainer()
lowercase__ : List[Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_)
self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
trainer.add_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
# We can also add, pop, or remove by instance
lowercase__ : Union[str, Any] = self.get_trainer()
lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.remove(SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
lowercase__ : str = self.get_trainer()
lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0]
lowercase__ : Union[str, Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
trainer.add_callback(SCREAMING_SNAKE_CASE_)
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback])
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# Independent log/save/eval
lowercase__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5)
trainer.train()
lowercase__ : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5)
trainer.train()
lowercase__ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""")
trainer.train()
lowercase__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
lowercase__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""")
trainer.train()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# A bit of everything
lowercase__ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
lowercase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_))
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""") as warn_mock:
lowercase__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(SCREAMING_SNAKE_CASE_) in warn_mock.call_args[0][0]
| 12
|
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
_a = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> Tuple:
"""simple docstring"""
for attribute in key.split('''.''' ):
_UpperCamelCase = getattr(__snake_case, __snake_case )
if weight_type is not None:
_UpperCamelCase = getattr(__snake_case, __snake_case ).shape
else:
_UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_UpperCamelCase = value
elif weight_type == "weight_g":
_UpperCamelCase = value
elif weight_type == "weight_v":
_UpperCamelCase = value
elif weight_type == "bias":
_UpperCamelCase = value
else:
_UpperCamelCase = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = fairseq_model.state_dict()
_UpperCamelCase = hf_model.feature_extractor
_UpperCamelCase = hf_model.adapter
for name, value in fairseq_dict.items():
_UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__snake_case, __snake_case, __snake_case, __snake_case, hf_model.config.feat_extract_norm == '''group''', )
_UpperCamelCase = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(__snake_case, __snake_case, __snake_case, __snake_case )
_UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_UpperCamelCase = True
if "*" in mapped_key:
_UpperCamelCase = name.split(__snake_case )[0].split('''.''' )[-2]
_UpperCamelCase = mapped_key.replace('''*''', __snake_case )
if "weight_g" in name:
_UpperCamelCase = '''weight_g'''
elif "weight_v" in name:
_UpperCamelCase = '''weight_v'''
elif "bias" in name:
_UpperCamelCase = '''bias'''
elif "weight" in name:
_UpperCamelCase = '''weight'''
else:
_UpperCamelCase = None
set_recursively(__snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = full_name.split('''conv_layers.''' )[-1]
_UpperCamelCase = name.split('''.''' )
_UpperCamelCase = int(items[0] )
_UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_UpperCamelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__snake_case )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = full_name.split('''adaptor.''' )[-1]
_UpperCamelCase = name.split('''.''' )
if items[1].isdigit():
_UpperCamelCase = int(items[1] )
else:
_UpperCamelCase = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
_UpperCamelCase = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(__snake_case, __snake_case ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(__snake_case )
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = emb.weight.shape
_UpperCamelCase = nn.Linear(__snake_case, __snake_case, bias=__snake_case )
_UpperCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = WavaVecaConfig.from_pretrained(
__snake_case, add_adapter=__snake_case, adapter_stride=__snake_case, adapter_kernel_size=__snake_case, use_auth_token=__snake_case, output_hidden_size=__snake_case, )
_UpperCamelCase = MBartConfig.from_pretrained(__snake_case )
# load model
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
}, )
_UpperCamelCase = model[0].eval()
# load feature extractor
_UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(__snake_case, use_auth_token=__snake_case )
# set weights for wav2vec2 encoder
_UpperCamelCase = WavaVecaModel(__snake_case )
recursively_load_weights_wavaveca(model.encoder, __snake_case )
# load decoder weights
_UpperCamelCase = MBartForCausalLM(__snake_case )
_UpperCamelCase , _UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=__snake_case )
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
_UpperCamelCase = SpeechEncoderDecoderModel(encoder=__snake_case, decoder=__snake_case )
_UpperCamelCase = False
_UpperCamelCase = MBartaaTokenizer(__snake_case )
tokenizer.save_pretrained(__snake_case )
_UpperCamelCase = hf_wavavec.config.to_dict()
_UpperCamelCase = tokenizer.pad_token_id
_UpperCamelCase = tokenizer.bos_token_id
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = '''mbart50'''
_UpperCamelCase = '''wav2vec2'''
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = 25_00_04
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(__snake_case )
hf_wavavec.save_pretrained(__snake_case )
feature_extractor.save_pretrained(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=25_0004, type=int, help="""`decoder_start_token_id` of model config""")
_a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 19
| 0
|
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 705
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : str = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 641
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.