code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
A : List[Any] = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
A : Tuple = [0, 2_5, 5_0]
A : Optional[Any] = [2_5, 5_0, 7_5]
A : int = fuzz.membership.trimf(X, abca)
A : Dict = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
A : Optional[int] = np.ones(7_5)
A : Any = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
A : Any = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
A : Tuple = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
A : List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
A : List[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
A : Optional[int] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
A : str = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
A : Optional[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
A : str = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 274
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__UpperCAmelCase = logging.get_logger(__name__)
enable_full_determinism()
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> List[str]:
return (3, 32, 32)
@property
def _UpperCamelCase ( self ) -> Tuple:
return (3, 32, 32)
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> Tuple:
return (4, 32, 32)
@property
def _UpperCamelCase ( self ) -> Tuple:
return (4, 32, 32)
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
SCREAMING_SNAKE_CASE_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model.to(_A )
SCREAMING_SNAKE_CASE_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _UpperCamelCase ( self ) -> Dict:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model_accelerate.to(_A )
model_accelerate.eval()
SCREAMING_SNAKE_CASE_ = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE_ = noise.to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] * noise.shape[0] ).to(_A )
SCREAMING_SNAKE_CASE_ = model_accelerate(_A , _A )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=_A , low_cpu_mem_usage=_A )
model_normal_load.to(_A )
model_normal_load.eval()
SCREAMING_SNAKE_CASE_ = model_normal_load(_A , _A )['''sample''']
assert torch_all_close(_A , _A , rtol=1E-3 )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(_A )
SCREAMING_SNAKE_CASE_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE_ = noise.to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] * noise.shape[0] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-3 ) )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self , _A=(32, 32) ) -> int:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> List[str]:
return (3, 32, 32)
@property
def _UpperCamelCase ( self ) -> List[Any]:
return (3, 32, 32)
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1E-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
@slow
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
SCREAMING_SNAKE_CASE_ = self.dummy_input
SCREAMING_SNAKE_CASE_ = floats_tensor((4, 3) + (256, 256) ).to(_A )
SCREAMING_SNAKE_CASE_ = noise
SCREAMING_SNAKE_CASE_ = model(**_A )
assert image is not None, "Make sure output is not None"
@slow
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(_A )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (256, 256)
SCREAMING_SNAKE_CASE_ = torch.ones((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [1E-4] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-2 ) )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(_A )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = torch.ones((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [1E-4] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-2 ) )
def _UpperCamelCase ( self ) -> Dict:
# not required for this model
pass
| 299
| 0
|
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__A = pytest.mark.integration
__A = {"comet"}
__A = importlib.util.find_spec("fairseq") is not None
__A = {"code_eval"}
__A = os.name == "nt"
__A = {"bertscore", "frugalscore", "perplexity"}
__A = importlib.util.find_spec("transformers") is not None
def UpperCamelCase__ ( lowercase__ : Tuple ):
@wraps(__lowerCamelCase )
def wrapper(self : Dict , lowercase__ : Tuple ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , __lowerCamelCase )
return wrapper
def UpperCamelCase__ ( lowercase__ : Dict ):
@wraps(__lowerCamelCase )
def wrapper(self : Optional[int] , lowercase__ : Optional[int] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , __lowerCamelCase )
return wrapper
def UpperCamelCase__ ( lowercase__ : List[Any] ):
@wraps(__lowerCamelCase )
def wrapper(self : List[Any] , lowercase__ : List[str] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , __lowerCamelCase )
return wrapper
def UpperCamelCase__ ( ):
snake_case : Tuple = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@local
class lowerCamelCase__ ( parameterized.TestCase ):
a__ : Optional[Any] = {}
a__ : Union[str, Any] = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Optional[int] = "[...]"
snake_case : int = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , _A ) ).module_path )
snake_case : Optional[Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=_A )
# check parameters
snake_case : Dict = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(_A , metric_module.__name__ ):
with self.use_local_metrics():
try:
snake_case : List[str] = doctest.testmod(_A , verbose=_A , raise_on_error=_A )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : List[Any] = "[...]"
snake_case : Tuple = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , _A ) ).module_path )
# run doctest
with self.use_local_metrics():
snake_case : int = doctest.testmod(_A , verbose=_A , raise_on_error=_A )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](_A ):
yield
else:
yield
@contextmanager
def lowerCamelCase_ ( self ):
"""simple docstring"""
def load_local_metric(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
return load_metric(os.path.join("metrics" , _A ) , *_A , **_A )
with patch("datasets.load_metric" ) as mock_load_metric:
snake_case : Union[str, Any] = load_local_metric
yield
@classmethod
def lowerCamelCase_ ( cls , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def wrapper(SCREAMING_SNAKE_CASE ):
snake_case : Optional[Any] = contextmanager(_A )
snake_case : Union[str, Any] = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def UpperCamelCase__ ( lowercase__ : Union[str, Any] ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE ):
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
assert len(input_dict["input_ids"] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
snake_case : Optional[int] = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def UpperCamelCase__ ( lowercase__ : Union[str, Any] ):
import torch
def bert_cos_score_idf(lowercase__ : Dict , lowercase__ : List[Any] , *lowercase__ : int , **lowercase__ : Dict ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(__lowerCamelCase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
snake_case : Any = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def UpperCamelCase__ ( lowercase__ : List[Any] ):
def load_from_checkpoint(lowercase__ : Dict ):
class lowerCamelCase__ :
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
assert len(_A ) == 2
snake_case : Any = [0.19, 0.92]
return scores, sum(_A ) / len(_A )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
snake_case : Optional[Any] = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
snake_case : Optional[Any] = load_from_checkpoint
yield
def UpperCamelCase__ ( ):
snake_case : Optional[int] = load_metric(os.path.join("metrics" , "seqeval" ) )
snake_case : Optional[int] = "ERROR"
snake_case : List[Any] = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(__lowerCamelCase , match=re.escape(__lowerCamelCase ) ):
metric.compute(predictions=[] , references=[] , scheme=__lowerCamelCase )
| 148
|
def A__ ( __lowerCamelCase = 10_00 ):
SCREAMING_SNAKE_CASE_ = 2**power
SCREAMING_SNAKE_CASE_ = 0
while n:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 299
| 0
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_A : Any = logging.get_logger(__name__)
_A : str = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class _lowercase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : List[str] = """trajectory_transformer"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE : List[Any] = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Any=1_00 , SCREAMING_SNAKE_CASE__ : int=5 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2_49 , SCREAMING_SNAKE_CASE__ : List[str]=6 , SCREAMING_SNAKE_CASE__ : int=17 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=25 , SCREAMING_SNAKE_CASE__ : Dict=4 , SCREAMING_SNAKE_CASE__ : int=4 , SCREAMING_SNAKE_CASE__ : Dict=1_28 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0_0_0_6 , SCREAMING_SNAKE_CASE__ : Optional[int]=5_12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=1e-1_2 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : List[Any]=5_02_56 , SCREAMING_SNAKE_CASE__ : Tuple=5_02_56 , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> int:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = action_weight
__lowerCAmelCase = reward_weight
__lowerCAmelCase = value_weight
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = block_size
__lowerCAmelCase = action_dim
__lowerCAmelCase = observation_dim
__lowerCAmelCase = transition_dim
__lowerCAmelCase = learning_rate
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_embd
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = kaiming_initializer_range
__lowerCAmelCase = use_cache
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
| 229
|
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["torch", "scipy"]
def __init__( self , *_A , **_A ) -> Tuple:
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Any:
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Tuple:
requires_backends(cls , ['''torch''', '''scipy'''] )
| 299
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowercase =feature_size
_lowercase =sampling_rate
_lowercase =padding_value
_lowercase =kwargs.pop('padding_side' , 'right' )
_lowercase =kwargs.pop('return_attention_mask' , _A )
super().__init__(**_A )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ) -> BatchFeature:
'''simple docstring'''
if isinstance(_A , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
_lowercase ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
F''' to this method that includes {self.model_input_names[0]}, but you provided'''
F''' {list(processed_features.keys() )}''' )
_lowercase =processed_features[self.model_input_names[0]]
_lowercase =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_A ) == 0:
if return_attention_mask:
_lowercase =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_lowercase =required_input[0]
if isinstance(_A , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_lowercase =0
while len(required_input[index] ) == 0:
index += 1
if index < len(_A ):
_lowercase =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_A ):
_lowercase ='tf'
elif is_torch_tensor(_A ):
_lowercase ='pt'
elif isinstance(_A , (int, float, list, tuple, np.ndarray) ):
_lowercase ='np'
else:
raise ValueError(
F'''type of {first_element} unknown: {type(_A )}. '''
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
_lowercase =to_numpy(_A )
else:
_lowercase =[to_numpy(_A ) for v in value]
# Convert padding_strategy in PaddingStrategy
_lowercase =self._get_padding_strategies(padding=_A , max_length=_A )
_lowercase =processed_features[self.model_input_names[0]]
_lowercase =len(_A )
if not all(len(_A ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
_lowercase =[]
for i in range(_A ):
_lowercase ={k: v[i] for k, v in processed_features.items()}
# truncation
_lowercase =self._truncate(
_A , max_length=_A , pad_to_multiple_of=_A , truncation=_A , )
truncated_inputs.append(_A )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_lowercase =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_lowercase =PaddingStrategy.MAX_LENGTH
_lowercase ={}
for i in range(_A ):
# padding
_lowercase =self._pad(
truncated_inputs[i] , max_length=_A , padding_strategy=_A , pad_to_multiple_of=_A , return_attention_mask=_A , )
for key, value in outputs.items():
if key not in batch_outputs:
_lowercase =[]
if value.dtype is np.dtype(np.floataa ):
_lowercase =value.astype(np.floataa )
batch_outputs[key].append(_A )
return BatchFeature(_A , tensor_type=_A )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase = None , lowerCAmelCase = None , ) -> dict:
'''simple docstring'''
_lowercase =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_lowercase =len(_A )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_lowercase =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_lowercase =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_A ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_lowercase =np.ones(len(_A ) , dtype=np.intaa )
if needs_to_be_padded:
_lowercase =max_length - len(_A )
if self.padding_side == "right":
if return_attention_mask:
_lowercase =np.pad(
processed_features['attention_mask'] , (0, difference) )
_lowercase =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_lowercase =np.pad(
_A , _A , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_lowercase =np.pad(
processed_features['attention_mask'] , (difference, 0) )
_lowercase =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_lowercase =np.pad(
_A , _A , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ) -> Any:
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
_lowercase =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_lowercase =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_lowercase =len(_A ) > max_length
if needs_to_be_truncated:
_lowercase =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_lowercase =processed_features['attention_mask'][:max_length]
return processed_features
def A__ ( self , lowerCAmelCase=False , lowerCAmelCase=None ) -> Optional[int]:
'''simple docstring'''
if padding is not False:
if padding is True:
_lowercase =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_A , _A ):
_lowercase =PaddingStrategy(_A )
elif isinstance(_A , _A ):
_lowercase =padding
else:
_lowercase =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 205
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=3 , _A=32 , _A=3 , _A=10 , _A=[10, 20, 30, 40] , _A=[1, 1, 2, 1] , _A=True , _A=True , _A="relu" , _A=3 , _A=None , ) -> Tuple:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embeddings_size
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = len(_A )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values
def _UpperCamelCase ( self ) -> Optional[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _UpperCamelCase ( self , _A , _A ) -> int:
SCREAMING_SNAKE_CASE_ = FlaxRegNetModel(config=_A )
SCREAMING_SNAKE_CASE_ = model(_A )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _UpperCamelCase ( self , _A , _A ) -> Any:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = FlaxRegNetForImageClassification(config=_A )
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =(FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> None:
SCREAMING_SNAKE_CASE_ = FlaxRegNetModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A , has_text_modality=_A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self ) -> str:
return
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _UpperCamelCase ( self ) -> int:
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _UpperCamelCase ( self ) -> Dict:
pass
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_A )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def _UpperCamelCase ( self ) -> Any:
def check_hidden_states_output(_A , _A , _A ):
SCREAMING_SNAKE_CASE_ = model_class(_A )
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_A , _A ) )
SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_ = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_A , _A , _A )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(_A , _A )
SCREAMING_SNAKE_CASE_ = model_class(_A )
@jax.jit
def model_jitted(_A , **_A ):
return model(pixel_values=_A , **_A )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE_ = model_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ = model_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase ( self ) -> Optional[int]:
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_A , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = model(**_A )
# verify the logits
SCREAMING_SNAKE_CASE_ = (1, 1000)
self.assertEqual(outputs.logits.shape , _A )
SCREAMING_SNAKE_CASE_ = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
| 299
| 0
|
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
enable_full_determinism()
class __A (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
'''simple docstring'''
__lowercase: str = UNetaDModel
__lowercase: Any = """sample"""
@property
def lowerCAmelCase ( self : str ) ->Optional[int]:
"""simple docstring"""
snake_case_ = 4
snake_case_ = 3
snake_case_ = (32, 32)
snake_case_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
snake_case_ = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
class __A (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
'''simple docstring'''
__lowercase: Optional[Any] = UNetaDModel
__lowercase: List[str] = """sample"""
@property
def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = 4
snake_case_ = 4
snake_case_ = (32, 32)
snake_case_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
snake_case_ = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
return (4, 32, 32)
@property
def lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
return (4, 32, 32)
def lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
snake_case_ = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self : int ) ->Tuple:
"""simple docstring"""
snake_case_ , snake_case_ = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(_A )
snake_case_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
snake_case_ , snake_case_ = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=_A )
model.to(_A )
snake_case_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
snake_case_ , snake_case_ = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=_A )
model_accelerate.to(_A )
model_accelerate.eval()
snake_case_ = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case_ = noise.to(_A )
snake_case_ = torch.tensor([10] * noise.shape[0] ).to(_A )
snake_case_ = model_accelerate(_A , _A )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
snake_case_ , snake_case_ = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=_A , low_cpu_mem_usage=_A )
model_normal_load.to(_A )
model_normal_load.eval()
snake_case_ = model_normal_load(_A , _A )["""sample"""]
assert torch_all_close(_A , _A , rtol=1E-3 )
def lowerCAmelCase ( self : Dict ) ->List[str]:
"""simple docstring"""
snake_case_ = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(_A )
snake_case_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case_ = noise.to(_A )
snake_case_ = torch.tensor([10] * noise.shape[0] ).to(_A )
with torch.no_grad():
snake_case_ = model(_A , _A ).sample
snake_case_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case_ = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-3 ) )
class __A (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
'''simple docstring'''
__lowercase: Union[str, Any] = UNetaDModel
__lowercase: Optional[Any] = """sample"""
@property
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : str=(32, 32) ) ->int:
"""simple docstring"""
snake_case_ = 4
snake_case_ = 3
snake_case_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
snake_case_ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_A )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
snake_case_ = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1E-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
snake_case_ , snake_case_ = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(_A )
snake_case_ = self.dummy_input
snake_case_ = floats_tensor((4, 3) + (256, 256) ).to(_A )
snake_case_ = noise
snake_case_ = model(**_A )
assert image is not None, "Make sure output is not None"
@slow
def lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
snake_case_ = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(_A )
snake_case_ = 4
snake_case_ = 3
snake_case_ = (256, 256)
snake_case_ = torch.ones((batch_size, num_channels) + sizes ).to(_A )
snake_case_ = torch.tensor(batch_size * [1E-4] ).to(_A )
with torch.no_grad():
snake_case_ = model(_A , _A ).sample
snake_case_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case_ = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-2 ) )
def lowerCAmelCase ( self : int ) ->int:
"""simple docstring"""
snake_case_ = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(_A )
snake_case_ = 4
snake_case_ = 3
snake_case_ = (32, 32)
snake_case_ = torch.ones((batch_size, num_channels) + sizes ).to(_A )
snake_case_ = torch.tensor(batch_size * [1E-4] ).to(_A )
with torch.no_grad():
snake_case_ = model(_A , _A ).sample
snake_case_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case_ = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-2 ) )
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
pass
| 347
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(number**0.5 )
return number == sq * sq
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
SCREAMING_SNAKE_CASE_ = x_den * y_den * z_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def A__ ( __lowerCamelCase = 35 ):
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = Fraction(0 )
SCREAMING_SNAKE_CASE_ = 42
for x_num in range(1, order + 1 ):
for x_den in range(x_num + 1, order + 1 ):
for y_num in range(1, order + 1 ):
for y_den in range(y_num + 1, order + 1 ):
# n=1
SCREAMING_SNAKE_CASE_ = x_num * y_den + x_den * y_num
SCREAMING_SNAKE_CASE_ = x_den * y_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
SCREAMING_SNAKE_CASE_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
SCREAMING_SNAKE_CASE_ = x_den * x_den * y_den * y_den
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=-1
SCREAMING_SNAKE_CASE_ = x_num * y_num
SCREAMING_SNAKE_CASE_ = x_den * y_num + x_num * y_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
SCREAMING_SNAKE_CASE_ = x_num * x_num * y_num * y_num
SCREAMING_SNAKE_CASE_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
for num, den in unique_s:
total += Fraction(__lowerCamelCase, __lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
| 0
|
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
_A = Mapping[str, np.ndarray]
_A = Mapping[str, Any] # Is a nested dict.
_A = 0.01
@dataclasses.dataclass(frozen=__SCREAMING_SNAKE_CASE )
class _lowerCAmelCase :
_lowercase =42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
_lowercase =42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
_lowercase =42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
_lowercase =42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
_lowercase =42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
_lowercase =None
# Optional remark about the protein. Included as a comment in output PDB
# files
_lowercase =None
# Templates used to generate this protein (prediction-only)
_lowercase =None
# Chain corresponding to each parent
_lowercase =None
def lowerCamelCase__ ( __lowerCAmelCase : List[Any] ):
"""simple docstring"""
lowerCAmelCase_ = r"(\[[A-Z]+\]\n)"
lowerCAmelCase_ = [tag.strip() for tag in re.split(__lowerCamelCase , __lowerCamelCase ) if len(__lowerCamelCase ) > 0]
lowerCAmelCase_ = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
lowerCAmelCase_ = ["N", "CA", "C"]
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowerCAmelCase_ = g[1][0].strip()
for i in range(len(__lowerCamelCase ) ):
if seq[i] not in residue_constants.restypes:
lowerCAmelCase_ = "X" # FIXME: strings are immutable
lowerCAmelCase_ = np.array(
[residue_constants.restype_order.get(__lowerCamelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowerCAmelCase_ = []
for axis in range(3 ):
tertiary.append(list(map(__lowerCamelCase , g[1][axis].split() ) ) )
lowerCAmelCase_ = np.array(__lowerCamelCase )
lowerCAmelCase_ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
lowerCAmelCase_ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowerCAmelCase_ = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
lowerCAmelCase_ = np.zeros(
(
len(__lowerCamelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowerCamelCase ):
lowerCAmelCase_ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowerCamelCase , atom_mask=__lowerCamelCase , aatype=__lowerCamelCase , residue_index=np.arange(len(__lowerCamelCase ) ) , b_factors=__lowerCamelCase , )
def lowerCamelCase__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] = 0 ):
"""simple docstring"""
lowerCAmelCase_ = []
lowerCAmelCase_ = prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
lowerCAmelCase_ = prot.parents
lowerCAmelCase_ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowerCAmelCase_ = [p for i, p in zip(__lowerCamelCase , __lowerCamelCase ) if i == chain_id]
if parents is None or len(__lowerCamelCase ) == 0:
lowerCAmelCase_ = ["N/A"]
pdb_headers.append(F"""PARENT {' '.join(__lowerCamelCase )}""" )
return pdb_headers
def lowerCamelCase__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
lowerCAmelCase_ = []
lowerCAmelCase_ = pdb_str.split("\n" )
lowerCAmelCase_ = prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
lowerCAmelCase_ = 42
if prot.parents is not None and len(prot.parents ) > 0:
lowerCAmelCase_ = []
if prot.parents_chain_index is not None:
lowerCAmelCase_ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowerCamelCase ) , [] )
parent_dict[str(__lowerCamelCase )].append(__lowerCamelCase )
lowerCAmelCase_ = max([int(__lowerCamelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowerCAmelCase_ = parent_dict.get(str(__lowerCamelCase ) , ["N/A"] )
parents_per_chain.append(__lowerCamelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowerCAmelCase_ = [["N/A"]]
def make_parent_line(__lowerCAmelCase : List[Any] ) -> str:
return F"""PARENT {' '.join(__lowerCamelCase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowerCAmelCase_ = 0
for i, l in enumerate(__lowerCamelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowerCamelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowerCamelCase ):
lowerCAmelCase_ = parents_per_chain[chain_counter]
else:
lowerCAmelCase_ = ["N/A"]
out_pdb_lines.append(make_parent_line(__lowerCamelCase ) )
return "\n".join(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase_ = residue_constants.restypes + ["X"]
def res_atoa(__lowerCAmelCase : Optional[Any] ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
lowerCAmelCase_ = residue_constants.atom_types
lowerCAmelCase_ = []
lowerCAmelCase_ = prot.atom_mask
lowerCAmelCase_ = prot.aatype
lowerCAmelCase_ = prot.atom_positions
lowerCAmelCase_ = prot.residue_index.astype(np.intaa )
lowerCAmelCase_ = prot.b_factors
lowerCAmelCase_ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
lowerCAmelCase_ = get_pdb_headers(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
pdb_lines.extend(__lowerCamelCase )
lowerCAmelCase_ = aatype.shape[0]
lowerCAmelCase_ = 1
lowerCAmelCase_ = 0
lowerCAmelCase_ = string.ascii_uppercase
lowerCAmelCase_ = None
# Add all atom sites.
for i in range(__lowerCamelCase ):
lowerCAmelCase_ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowerCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowerCAmelCase_ = "ATOM"
lowerCAmelCase_ = atom_name if len(__lowerCamelCase ) == 4 else F""" {atom_name}"""
lowerCAmelCase_ = ""
lowerCAmelCase_ = ""
lowerCAmelCase_ = 1.00
lowerCAmelCase_ = atom_name[0] # Protein supports only C, N, O, S, this works.
lowerCAmelCase_ = ""
lowerCAmelCase_ = "A"
if chain_index is not None:
lowerCAmelCase_ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowerCAmelCase_ = (
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
lowerCAmelCase_ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowerCAmelCase_ = True
lowerCAmelCase_ = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowerCAmelCase_ = "TER"
lowerCAmelCase_ = (
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__lowerCamelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowerCamelCase , __lowerCamelCase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCAmelCase : int ):
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def lowerCamelCase__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : str = None , __lowerCAmelCase : Optional[Any] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : int = None , __lowerCAmelCase : Dict = None , ):
"""simple docstring"""
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__lowerCamelCase , remark=__lowerCamelCase , parents=__lowerCamelCase , parents_chain_index=__lowerCamelCase , )
| 231
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__( self , _A , _A = None , _A = None ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE_ = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
SCREAMING_SNAKE_CASE_ = torch.zeros(_A , _A )
else:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(_A )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
def __init__( self , _A , _A , _A , _A , _A , _A , ) -> Any:
super().__init__()
self.register_modules(
vqvae=_A , transformer=_A , text_encoder=_A , tokenizer=_A , scheduler=_A , learned_classifier_free_sampling_embeddings=_A , )
def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = len(_A ) if isinstance(_A , _A ) else 1
# get prompt text embeddings
SCREAMING_SNAKE_CASE_ = self.tokenizer(
_A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
SCREAMING_SNAKE_CASE_ = text_input_ids[:, : self.tokenizer.model_max_length]
SCREAMING_SNAKE_CASE_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
SCREAMING_SNAKE_CASE_ = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_A )
# duplicate text embeddings for each generation per prompt
SCREAMING_SNAKE_CASE_ = prompt_embeds.repeat_interleave(_A , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
SCREAMING_SNAKE_CASE_ = self.learned_classifier_free_sampling_embeddings.embeddings
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.unsqueeze(0 ).repeat(_A , 1 , 1 )
else:
SCREAMING_SNAKE_CASE_ = [''''''] * batch_size
SCREAMING_SNAKE_CASE_ = text_input_ids.shape[-1]
SCREAMING_SNAKE_CASE_ = self.tokenizer(
_A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_A )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.shape[1]
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.repeat(1 , _A , 1 )
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE_ = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , _A , _A = 100 , _A = 5.0 , _A = 1.0 , _A = 1 , _A = None , _A = None , _A = "pil" , _A = True , _A = None , _A = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = 1
elif isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = len(_A )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(_A )}''' )
SCREAMING_SNAKE_CASE_ = batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE_ = guidance_scale > 1.0
SCREAMING_SNAKE_CASE_ = self._encode_prompt(_A , _A , _A )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(_A )}.''' )
# get the initial completely masked latents unless the user supplied it
SCREAMING_SNAKE_CASE_ = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
SCREAMING_SNAKE_CASE_ = self.transformer.num_vector_embeds - 1
SCREAMING_SNAKE_CASE_ = torch.full(_A , _A ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
SCREAMING_SNAKE_CASE_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_A , device=self.device )
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps.to(self.device )
SCREAMING_SNAKE_CASE_ = latents
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the sample if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
SCREAMING_SNAKE_CASE_ = self.transformer(_A , encoder_hidden_states=_A , timestep=_A ).sample
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model_output.chunk(2 )
SCREAMING_SNAKE_CASE_ = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_A , dim=1 , keepdim=_A )
SCREAMING_SNAKE_CASE_ = self.truncate(_A , _A )
# remove `log(0)`'s (`-inf`s)
SCREAMING_SNAKE_CASE_ = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , timestep=_A , sample=_A , generator=_A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A , _A )
SCREAMING_SNAKE_CASE_ = self.vqvae.config.vq_embed_dim
SCREAMING_SNAKE_CASE_ = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
SCREAMING_SNAKE_CASE_ = self.vqvae.quantize.get_codebook_entry(_A , shape=_A )
SCREAMING_SNAKE_CASE_ = self.vqvae.decode(_A , force_not_quantize=_A ).sample
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
def _UpperCamelCase ( self , _A , _A ) -> torch.FloatTensor:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.sort(_A , 1 , descending=_A )
SCREAMING_SNAKE_CASE_ = torch.exp(_A )
SCREAMING_SNAKE_CASE_ = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
SCREAMING_SNAKE_CASE_ = torch.full_like(keep_mask[:, 0:1, :] , _A )
SCREAMING_SNAKE_CASE_ = torch.cat((all_true, keep_mask) , dim=1 )
SCREAMING_SNAKE_CASE_ = keep_mask[:, :-1, :]
SCREAMING_SNAKE_CASE_ = keep_mask.gather(1 , indices.argsort(1 ) )
SCREAMING_SNAKE_CASE_ = log_p_x_0.clone()
SCREAMING_SNAKE_CASE_ = -torch.inf # -inf = log(0)
return rv
| 299
| 0
|
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase : Tuple = 16
lowercase : Tuple = 32
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] = 16) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained("bert-base-cased")
__UpperCamelCase : Optional[Any] = DatasetDict(
{
"train": dataset["train"].select(__lowerCamelCase),
"validation": dataset["train"].select(__lowerCamelCase),
"test": dataset["validation"],
})
def tokenize_function(_lowerCamelCase : Optional[Any]):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowerCamelCase , max_length=__lowerCamelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCamelCase : int = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase : Dict = tokenized_datasets.rename_column("label" , "labels")
def collate_fn(_lowerCamelCase : Optional[int]):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCamelCase : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCamelCase : Tuple = 16
elif accelerator.mixed_precision != "no":
__UpperCamelCase : str = 8
else:
__UpperCamelCase : Tuple = None
return tokenizer.pad(
__lowerCamelCase , padding="longest" , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
__UpperCamelCase : Dict = DataLoader(
tokenized_datasets["train"] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase)
__UpperCamelCase : Tuple = DataLoader(
tokenized_datasets["validation"] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase)
__UpperCamelCase : List[Any] = DataLoader(
tokenized_datasets["test"] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase)
return train_dataloader, eval_dataloader, test_dataloader
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any]) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = []
# Download the dataset
__UpperCamelCase : Optional[int] = load_dataset("glue" , "mrpc")
# Create our splits
__UpperCamelCase : Optional[int] = StratifiedKFold(n_splits=int(args.num_folds))
# Initialize accelerator
__UpperCamelCase : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase : Dict = config["lr"]
__UpperCamelCase : str = int(config["num_epochs"])
__UpperCamelCase : List[str] = int(config["seed"])
__UpperCamelCase : int = int(config["batch_size"])
__UpperCamelCase : List[str] = evaluate.load("glue" , "mrpc")
# If the batch size is too big we use gradient accumulation
__UpperCamelCase : Dict = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__UpperCamelCase : Dict = batch_size // MAX_GPU_BATCH_SIZE
__UpperCamelCase : List[str] = MAX_GPU_BATCH_SIZE
set_seed(__lowerCamelCase)
# New Code #
# Create our folds:
__UpperCamelCase : List[Any] = kfold.split(np.zeros(datasets["train"].num_rows) , datasets["train"]["label"])
__UpperCamelCase : Optional[Any] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__lowerCamelCase):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] = get_fold_dataloaders(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase : int = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__lowerCamelCase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCamelCase : List[Any] = model.to(accelerator.device)
# Instantiate optimizer
__UpperCamelCase : Optional[int] = AdamW(params=model.parameters() , lr=__lowerCamelCase)
# Instantiate scheduler
__UpperCamelCase : Any = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCamelCase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Dict = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# Now we train the model
for epoch in range(__lowerCamelCase):
model.train()
for step, batch in enumerate(__lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
__UpperCamelCase : Tuple = model(**__lowerCamelCase)
__UpperCamelCase : List[str] = outputs.loss
__UpperCamelCase : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(__lowerCamelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
__UpperCamelCase : str = model(**__lowerCamelCase)
__UpperCamelCase : int = outputs.logits.argmax(dim=-1)
__UpperCamelCase , __UpperCamelCase : Optional[int] = accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
__UpperCamelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , __lowerCamelCase)
# New Code #
# We also run predictions on the test set at the very end
__UpperCamelCase : List[str] = []
for step, batch in enumerate(__lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
__UpperCamelCase : Tuple = model(**__lowerCamelCase)
__UpperCamelCase : Union[str, Any] = outputs.logits
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["labels"]))
fold_predictions.append(predictions.cpu())
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu())
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__lowerCamelCase , dim=0))
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
__UpperCamelCase : Dict = torch.cat(__lowerCamelCase , dim=0)
__UpperCamelCase : Tuple = torch.stack(__lowerCamelCase , dim=0).sum(dim=0).div(int(args.num_folds)).argmax(dim=-1)
__UpperCamelCase : List[Any] = metric.compute(predictions=__lowerCamelCase , references=__lowerCamelCase)
accelerator.print("Average test metrics from all folds:" , __lowerCamelCase)
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : int = argparse.ArgumentParser(description="Simple example of training script.")
parser.add_argument(
"--mixed_precision" , type=__lowerCamelCase , default=__lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU.")
# New Code #
parser.add_argument("--num_folds" , type=__lowerCamelCase , default=3 , help="The number of splits to perform across the dataset")
__UpperCamelCase : Tuple = parser.parse_args()
__UpperCamelCase : List[Any] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__lowerCamelCase , __lowerCamelCase)
if __name__ == "__main__":
main()
| 232
|
def A__ ( __lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase : Tuple = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = ['LayoutLMv2FeatureExtractor']
lowerCamelCase : List[str] = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 124
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCAmelCase = "pt"
elif is_tf_available():
__UpperCAmelCase = "tf"
else:
__UpperCAmelCase = "jax"
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =ByTaTokenizer
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Tuple:
super().setUp()
SCREAMING_SNAKE_CASE_ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCamelCase ( self ) -> List[str]:
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def _UpperCamelCase ( self , **_A ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _UpperCamelCase ( self , _A , _A=False , _A=20 , _A=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
SCREAMING_SNAKE_CASE_ = []
for i in range(len(_A ) ):
try:
SCREAMING_SNAKE_CASE_ = tokenizer.decode([i] , clean_up_tokenization_spaces=_A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda _A : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , _A ) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
SCREAMING_SNAKE_CASE_ = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
SCREAMING_SNAKE_CASE_ = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE_ = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
SCREAMING_SNAKE_CASE_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE_ = ''' ''' + output_txt
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
SCREAMING_SNAKE_CASE_ = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = '''Unicode €.'''
SCREAMING_SNAKE_CASE_ = tokenizer(_A )
SCREAMING_SNAKE_CASE_ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A )
self.assertEqual(_A , '''Unicode €.</s>''' )
SCREAMING_SNAKE_CASE_ = tokenizer('''e è é ê ë''' )
SCREAMING_SNAKE_CASE_ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A )
self.assertEqual(_A , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(_A , padding=_A , return_tensors=_A )
self.assertIsInstance(_A , _A )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
SCREAMING_SNAKE_CASE_ = tokenizer(_A , padding=_A , return_tensors=_A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , _A )
self.assertIn('''attention_mask''' , _A )
self.assertNotIn('''decoder_input_ids''' , _A )
self.assertNotIn('''decoder_attention_mask''' , _A )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = [
'''Summary of the text.''',
'''Another summary.''',
]
SCREAMING_SNAKE_CASE_ = tokenizer(
text_target=_A , max_length=32 , padding='''max_length''' , truncation=_A , return_tensors=_A )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization. </s>''']
SCREAMING_SNAKE_CASE_ = ['''Summary of the text. </s>''']
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
SCREAMING_SNAKE_CASE_ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(_A , text_target=_A )
self.assertEqual(_A , batch['''input_ids'''][0] )
self.assertEqual(_A , batch['''labels'''][0] )
def _UpperCamelCase ( self ) -> Dict:
# safety check on max_len default value so we are sure the test works
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = ''' He is very happy, UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
shutil.rmtree(_A )
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
SCREAMING_SNAKE_CASE_ = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_A )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(_A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(_A )
SCREAMING_SNAKE_CASE_ = [F'''<extra_id_{i}>''' for i in range(125 )]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(_A , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
_A , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_A )]
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(_A )
self.assertTrue(tokenizer.decode([255] ) == '''''' )
def _UpperCamelCase ( self ) -> int:
pass
def _UpperCamelCase ( self ) -> Any:
pass
def _UpperCamelCase ( self ) -> Any:
pass
def _UpperCamelCase ( self ) -> Optional[int]:
pass
def _UpperCamelCase ( self ) -> Union[str, Any]:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(_A , _A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(
_A , skip_special_tokens=_A )
for attr in attributes_list:
setattr(_A , attr + '''_id''' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '''_id''' ) , _A )
setattr(_A , attr + '''_id''' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '''_id''' ) , _A )
setattr(_A , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [] )
setattr(_A , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
| 299
| 0
|
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase__ = get_tests_dir('''fixtures''')
lowerCAmelCase__ = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
lowerCAmelCase__ = get_tests_dir('''fixtures/dummy-config.json''')
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(_A ,_A )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A ,_A )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__lowercase = AutoFeatureExtractor.from_pretrained(_A ).to_dict()
config_dict.pop('''feature_extractor_type''' )
__lowercase = WavaVecaFeatureExtractor(**_A )
# save in new folder
model_config.save_pretrained(_A )
config.save_pretrained(_A )
__lowercase = AutoFeatureExtractor.from_pretrained(_A )
# make sure private variable is not incorrectly saved
__lowercase = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_A ,_A )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A ,_A )
def SCREAMING_SNAKE_CASE ( self : int ):
with self.assertRaisesRegex(
_A ,'''bert-base is not a local folder and is not a valid model identifier''' ):
__lowercase = AutoFeatureExtractor.from_pretrained('''bert-base''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
with self.assertRaisesRegex(
_A ,r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__lowercase = AutoFeatureExtractor.from_pretrained(_A ,revision='''aaaaaa''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
with self.assertRaisesRegex(
_A ,'''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' ,):
__lowercase = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_A ):
__lowercase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A ):
__lowercase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' ,trust_remote_code=_A )
__lowercase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' ,trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__ ,'''NewFeatureExtractor''' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_A )
__lowercase = AutoFeatureExtractor.from_pretrained(_A ,trust_remote_code=_A )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ ,'''NewFeatureExtractor''' )
def SCREAMING_SNAKE_CASE ( self : str ):
try:
AutoConfig.register('''custom''' ,_A )
AutoFeatureExtractor.register(_A ,_A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoFeatureExtractor.register(_A ,_A )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowercase = CustomFeatureExtractor.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_A )
__lowercase = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A ,_A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self : Dict ):
class lowercase_ (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = True
try:
AutoConfig.register('''custom''' ,_A )
AutoFeatureExtractor.register(_A ,_A )
# If remote code is not set, the default is to use local
__lowercase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
self.assertEqual(feature_extractor.__class__.__name__ ,'''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__lowercase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' ,trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__ ,'''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__lowercase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' ,trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__ ,'''NewFeatureExtractor''' )
self.assertTrue(not hasattr(_A ,'''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 104
|
from cva import destroyAllWindows, imread, imshow, waitKey
def A__ ( __lowerCamelCase ):
# getting number of pixels in the image
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__UpperCAmelCase = imread("image_data/lena.jpg", 1)
# convert to its negative
__UpperCAmelCase = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 299
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=1_3 , _lowerCAmelCase : Any=3_0 , _lowerCAmelCase : int=2 , _lowerCAmelCase : Dict=3 , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : int=3_2 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : List[str]=4 , _lowerCAmelCase : List[Any]=3_7 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : int=1_0 , _lowerCAmelCase : str=0.02 , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : List[Any]=2 , ):
'''simple docstring'''
__lowercase =parent
__lowercase =batch_size
__lowercase =image_size
__lowercase =patch_size
__lowercase =num_channels
__lowercase =is_training
__lowercase =use_labels
__lowercase =hidden_size
__lowercase =num_hidden_layers
__lowercase =num_attention_heads
__lowercase =intermediate_size
__lowercase =hidden_act
__lowercase =hidden_dropout_prob
__lowercase =attention_probs_dropout_prob
__lowercase =type_sequence_label_size
__lowercase =initializer_range
__lowercase =scope
__lowercase =encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase =(image_size // patch_size) ** 2
__lowercase =num_patches + 1
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowercase =None
if self.use_labels:
__lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowercase =self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self : Any):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCamelCase ( self : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict):
'''simple docstring'''
__lowercase =ViTModel(config=_A)
model.to(_A)
model.eval()
__lowercase =model(_A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __lowerCamelCase ( self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Any , _lowerCAmelCase : str):
'''simple docstring'''
__lowercase =ViTForMaskedImageModeling(config=_A)
model.to(_A)
model.eval()
__lowercase =model(_A)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
__lowercase =1
__lowercase =ViTForMaskedImageModeling(_A)
model.to(_A)
model.eval()
__lowercase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__lowercase =model(_A)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def __lowerCamelCase ( self : int , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str):
'''simple docstring'''
__lowercase =self.type_sequence_label_size
__lowercase =ViTForImageClassification(_A)
model.to(_A)
model.eval()
__lowercase =model(_A , labels=_A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__lowercase =1
__lowercase =ViTForImageClassification(_A)
model.to(_A)
model.eval()
__lowercase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__lowercase =model(_A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) =config_and_inputs
__lowercase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =ViTModelTester(self)
__lowercase =ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7)
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
pass
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase , __lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase =model_class(_A)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__lowercase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear))
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase , __lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase =model_class(_A)
__lowercase =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase =[*signature.parameters.keys()]
__lowercase =['pixel_values']
self.assertListEqual(arg_names[:1] , _A)
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A)
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_A)
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A)
@slow
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase =ViTModel.from_pretrained(_A)
self.assertIsNotNone(_A)
def _A ( ):
"""simple docstring"""
__lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCamelCase ( self : int):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').to(_A)
__lowercase =self.default_image_processor
__lowercase =prepare_img()
__lowercase =image_processor(images=_A , return_tensors='pt').to(_A)
# forward pass
with torch.no_grad():
__lowercase =model(**_A)
# verify the logits
__lowercase =torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , _A)
__lowercase =torch.tensor([-0.2744, 0.8215, -0.0836]).to(_A)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4))
@slow
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =ViTModel.from_pretrained('facebook/dino-vits8').to(_A)
__lowercase =ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_8_0)
__lowercase =prepare_img()
__lowercase =image_processor(images=_A , return_tensors='pt')
__lowercase =inputs.pixel_values.to(_A)
# forward pass
with torch.no_grad():
__lowercase =model(_A , interpolate_pos_encoding=_A)
# verify the logits
__lowercase =torch.Size((1, 3_6_0_1, 3_8_4))
self.assertEqual(outputs.last_hidden_state.shape , _A)
__lowercase =torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]]).to(_A)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase =ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto')
__lowercase =self.default_image_processor
__lowercase =prepare_img()
__lowercase =image_processor(images=_A , return_tensors='pt')
__lowercase =inputs.pixel_values.to(_A)
# forward pass to make sure inference works in fp16
with torch.no_grad():
__lowercase =model(_A)
| 166
|
import math
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__lowerCamelCase )
def A__ ( __lowerCamelCase = 1 / 1_23_45 ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 3
while True:
SCREAMING_SNAKE_CASE_ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(__lowerCamelCase )
total_partitions += 1
if check_partition_perfect(__lowerCamelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__lowerCamelCase )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
}
}
UpperCamelCase_ = {
"""facebook/mbart-large-en-ro""": 10_24,
"""facebook/mbart-large-cc25""": 10_24,
}
# fmt: off
UpperCamelCase_ = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class a_ (__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
__lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[str] = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase : List[str] = []
__lowerCAmelCase : List[Any] = []
def __init__( self , snake_case_ , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_ = None , snake_case_=None , **snake_case_ , ):
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : Optional[Any] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
_lowerCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , tokenizer_file=_A , src_lang=_A , tgt_lang=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
_lowerCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_A ) )
_lowerCAmelCase : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCAmelCase : Optional[int] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Union[str, Any] = len(self.sp_model )
_lowerCAmelCase : str = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_A )
}
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.lang_code_to_id.items()}
_lowerCAmelCase : str = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_lowerCAmelCase : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_lowerCAmelCase : Union[str, Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
_lowerCAmelCase : Dict = src_lang if src_lang is not None else """en_XX"""
_lowerCAmelCase : int = self.lang_code_to_id[self._src_lang]
_lowerCAmelCase : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
_lowerCAmelCase : List[Any] = self.__dict__.copy()
_lowerCAmelCase : int = None
_lowerCAmelCase : Dict = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , snake_case_ ):
_lowerCAmelCase : str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __UpperCamelCase ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __UpperCamelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
_lowerCAmelCase : Optional[Any] = [1] * len(self.prefix_tokens )
_lowerCAmelCase : int = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_A )) + suffix_ones
return prefix_ones + ([0] * len(_A )) + ([0] * len(_A )) + suffix_ones
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Optional[int] = [self.sep_token_id]
_lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase : List[str] = src_lang
_lowerCAmelCase : Any = self(_A , add_special_tokens=_A , return_tensors=_A , **_A )
_lowerCAmelCase : List[Any] = self.convert_tokens_to_ids(_A )
_lowerCAmelCase : List[Any] = tgt_lang_id
return inputs
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.encode(_A , out_type=_A )
def __UpperCamelCase ( self , snake_case_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase : Dict = self.sp_model.PieceToId(_A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __UpperCamelCase ( self , snake_case_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : str = """""".join(_A ).replace(_A , """ """ ).strip()
return out_string
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if not os.path.isdir(_A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase : List[str] = os.path.join(
_A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , """wb""" ) as fi:
_lowerCAmelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
def __UpperCamelCase ( self , snake_case_ , snake_case_ = "en_XX" , snake_case_ = None , snake_case_ = "ro_RO" , **snake_case_ , ):
_lowerCAmelCase : int = src_lang
_lowerCAmelCase : Any = tgt_lang
return super().prepare_seqaseq_batch(_A , _A , **_A )
def __UpperCamelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : str = self.lang_code_to_id[src_lang]
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : str = self.lang_code_to_id[lang]
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code]
| 309
|
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase ) if (len(__lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ), '''Stack'''.center(__lowerCamelCase ), '''Postfix'''.center(__lowerCamelCase ), sep=''' | ''', )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowerCamelCase ) == 0:
stack.append(__lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowerCamelCase ) # push x to stack
print(
x.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
while len(__lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
return "".join(__lowerCamelCase ) # return Postfix as str
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowerCamelCase ) ):
if infix[i] == "(":
SCREAMING_SNAKE_CASE_ = ''')''' # change "(" to ")"
elif infix[i] == ")":
SCREAMING_SNAKE_CASE_ = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(__lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__UpperCAmelCase = input("\nEnter an Infix Equation = ") # Input an Infix equation
__UpperCAmelCase = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 299
| 0
|
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class A (__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Tuple , __lowerCAmelCase : str = None , __lowerCAmelCase : Dict = None , __lowerCAmelCase : Any = None , __lowerCAmelCase : Tuple = None , __lowerCAmelCase : Any = False , __lowerCAmelCase : Tuple = False , __lowerCAmelCase : int = None , **__lowerCAmelCase : Optional[Any] , ) -> int:
"""simple docstring"""
A__ = path_or_paths
A__ = split if split or isinstance(_A , _A ) else """train"""
A__ = features
A__ = cache_dir
A__ = keep_in_memory
A__ = streaming
A__ = num_proc
A__ = kwargs
@abstractmethod
def a_ ( self : Tuple ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
"""simple docstring"""
pass
class A (__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[str] , __lowerCAmelCase : Tuple = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Union[str, Any] = False , __lowerCAmelCase : List[str] = False , __lowerCAmelCase : str = None , **__lowerCAmelCase : Optional[Any] , ) -> Dict:
"""simple docstring"""
A__ = features
A__ = cache_dir
A__ = keep_in_memory
A__ = streaming
A__ = num_proc
A__ = kwargs
@abstractmethod
def a_ ( self : str ) -> Union[Dataset, IterableDataset]:
"""simple docstring"""
pass
| 274
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["input_features", "is_longer"]
def __init__( self , _A=64 , _A=48000 , _A=480 , _A=10 , _A=1024 , _A=0.0 , _A=False , _A = 0 , _A = 14000 , _A = None , _A = "fusion" , _A = "repeatpad" , **_A , ) -> Dict:
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
SCREAMING_SNAKE_CASE_ = top_db
SCREAMING_SNAKE_CASE_ = truncation
SCREAMING_SNAKE_CASE_ = padding
SCREAMING_SNAKE_CASE_ = fft_window_size
SCREAMING_SNAKE_CASE_ = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE_ = hop_length
SCREAMING_SNAKE_CASE_ = max_length_s
SCREAMING_SNAKE_CASE_ = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = frequency_min
SCREAMING_SNAKE_CASE_ = frequency_max
SCREAMING_SNAKE_CASE_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm=_A , mel_scale='''htk''' , )
SCREAMING_SNAKE_CASE_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , )
def _UpperCamelCase ( self ) -> Dict[str, Any]:
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _UpperCamelCase ( self , _A , _A = None ) -> np.ndarray:
SCREAMING_SNAKE_CASE_ = spectrogram(
_A , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_A , log_mel='''dB''' , )
return log_mel_spectrogram.T
def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE_ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE_ = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE_ = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.interpolate(
_A , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=_A )
SCREAMING_SNAKE_CASE_ = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _UpperCamelCase ( self , _A , _A , _A , _A ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE_ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE_ = len(_A ) - max_length
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE_ = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters )
SCREAMING_SNAKE_CASE_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE_ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE_ = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE_ = False
else:
SCREAMING_SNAKE_CASE_ = self._random_mel_fusion(_A , _A , _A )
SCREAMING_SNAKE_CASE_ = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
SCREAMING_SNAKE_CASE_ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE_ = int(max_length / len(_A ) )
SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_A , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE_ = int(max_length / len(_A ) )
SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_A , _A ) )
SCREAMING_SNAKE_CASE_ = np.pad(_A , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters )
SCREAMING_SNAKE_CASE_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , **_A , ) -> BatchFeature:
SCREAMING_SNAKE_CASE_ = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE_ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE_ = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE_ = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray(_A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
SCREAMING_SNAKE_CASE_ = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray(_A )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE_ = [
self._get_input_mel(_A , max_length if max_length else self.nb_max_samples , _A , _A )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for mel, longer in padded_inputs:
input_mel.append(_A )
is_longer.append(_A )
if truncation == "fusion" and sum(_A ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , len(_A ) )
SCREAMING_SNAKE_CASE_ = True
if isinstance(input_mel[0] , _A ):
SCREAMING_SNAKE_CASE_ = [np.asarray(_A , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE_ = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE_ = {'''input_features''': input_mel, '''is_longer''': is_longer}
SCREAMING_SNAKE_CASE_ = BatchFeature(_A )
if return_tensors is not None:
SCREAMING_SNAKE_CASE_ = input_features.convert_to_tensors(_A )
return input_features
| 299
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
def UpperCamelCase__ ( lowercase__ : List[str] , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : int , lowercase__ : Union[str, Any] ):
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__lowerCamelCase ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
def UpperCamelCase__ ( ):
snake_case : Optional[Any] = [90, 23, 6, 33, 21, 65, 123, 3_4423]
snake_case : Union[str, Any] = math.log(len(__lowerCamelCase ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 148
|
import math
import random
def A__ ( __lowerCamelCase, __lowerCamelCase = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCAmelCase = 0.02
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = float(2 * (random.randint(1, 1_00 )) - 1 )
for _ in range(__lowerCamelCase ):
# Forward propagation
SCREAMING_SNAKE_CASE_ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
SCREAMING_SNAKE_CASE_ = (expected / 1_00) - layer_a
# Error delta
SCREAMING_SNAKE_CASE_ = layer_1_error * sigmoid_function(__lowerCamelCase, __lowerCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input("Expected value: "))
__UpperCAmelCase = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 299
| 0
|
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : List[Any] = ProphetNetTokenizer
_SCREAMING_SNAKE_CASE : Any = False
def a ( self : str ) -> Dict:
super().setUp()
__lowerCAmelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]:
__lowerCAmelCase = """UNwant\u00E9d,running"""
__lowerCAmelCase = """unwanted, running"""
return input_text, output_text
def a ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file )
__lowerCAmelCase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_A , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] )
def a ( self : Union[str, Any] ) -> Dict:
__lowerCAmelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def a ( self : str ) -> Union[str, Any]:
__lowerCAmelCase = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def a ( self : List[str] ) -> List[str]:
__lowerCAmelCase = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def a ( self : Optional[int] ) -> Optional[int]:
__lowerCAmelCase = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def a ( self : Union[str, Any] ) -> List[Any]:
__lowerCAmelCase = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def a ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def a ( self : str ) -> List[str]:
__lowerCAmelCase = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def a ( self : Optional[Any] ) -> List[Any]:
__lowerCAmelCase = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def a ( self : List[Any] ) -> Tuple:
__lowerCAmelCase = BasicTokenizer(do_lower_case=_A , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def a ( self : str ) -> List[Any]:
__lowerCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__lowerCAmelCase = {}
for i, token in enumerate(_A ):
__lowerCAmelCase = i
__lowerCAmelCase = WordpieceTokenizer(vocab=_A , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
@require_torch
def a ( self : Tuple ) -> List[str]:
__lowerCAmelCase = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
__lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__lowerCAmelCase = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
__lowerCAmelCase = tokenizer(_A , padding=_A , return_tensors="""pt""" )
self.assertIsInstance(_A , _A )
__lowerCAmelCase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def a ( self : List[Any] ) -> List[Any]:
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def a ( self : Tuple ) -> str:
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def a ( self : Optional[int] ) -> Optional[int]:
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
@slow
def a ( self : Dict ) -> Any:
__lowerCAmelCase = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
__lowerCAmelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=_A )
__lowerCAmelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_A )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_A )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 229
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 299
| 0
|
from __future__ import annotations
def a ( A__ : str , A__ : List[Any] , A__ : Any ) -> Optional[Any]:
"""simple docstring"""
_lowercase =list(range(len(__lowerCamelCase ) ) )
_lowercase =[v / w for v, w in zip(__lowerCamelCase , __lowerCamelCase )]
index.sort(key=lambda A__ : ratio[i] , reverse=__lowerCamelCase )
_lowercase =0
_lowercase =[0] * len(__lowerCamelCase )
for i in index:
if weight[i] <= capacity:
_lowercase =1
max_value += value[i]
capacity -= weight[i]
else:
_lowercase =capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 205
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , _A = False , ) -> List[str]:
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Embedding(_A , _A )
SCREAMING_SNAKE_CASE_ = nn.Embedding(_A , _A )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = nn.Dropout(p=_A )
SCREAMING_SNAKE_CASE_ = TaConfig(
vocab_size=_A , d_model=_A , num_heads=_A , d_kv=_A , d_ff=_A , dropout_rate=_A , feed_forward_proj=_A , is_decoder=_A , is_encoder_decoder=_A , )
SCREAMING_SNAKE_CASE_ = nn.ModuleList()
for lyr_num in range(_A ):
SCREAMING_SNAKE_CASE_ = TaBlock(_A )
self.encoders.append(_A )
SCREAMING_SNAKE_CASE_ = TaLayerNorm(_A )
SCREAMING_SNAKE_CASE_ = nn.Dropout(p=_A )
def _UpperCamelCase ( self , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.token_embedder(_A )
SCREAMING_SNAKE_CASE_ = encoder_input_tokens.shape[1]
SCREAMING_SNAKE_CASE_ = torch.arange(_A , device=encoder_input_tokens.device )
x += self.position_encoding(_A )
SCREAMING_SNAKE_CASE_ = self.dropout_pre(_A )
# inverted the attention mask
SCREAMING_SNAKE_CASE_ = encoder_input_tokens.size()
SCREAMING_SNAKE_CASE_ = self.get_extended_attention_mask(_A , _A )
for lyr in self.encoders:
SCREAMING_SNAKE_CASE_ = lyr(_A , _A )[0]
SCREAMING_SNAKE_CASE_ = self.layer_norm(_A )
return self.dropout_post(_A ), encoder_inputs_mask
| 299
| 0
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__SCREAMING_SNAKE_CASE : List[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
__SCREAMING_SNAKE_CASE : List[str] = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
__SCREAMING_SNAKE_CASE : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __A :
'''simple docstring'''
__lowercase: str = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""})
__lowercase: Dict = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""})
__lowercase: Dict = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The column name of the images in the files. If not set, will try to use 'image' or 'img'."""} , )
__lowercase: Optional[int] = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """A folder containing the training data."""})
__lowercase: int = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """A folder containing the validation data."""})
__lowercase: Optional[int] = field(
default=0.1_5 , metadata={"""help""": """Percent to split off of train for validation."""})
__lowercase: Dict = field(default=32 , metadata={"""help""": """The size of the square patches to use for masking."""})
__lowercase: List[Any] = field(
default=0.6 , metadata={"""help""": """Percentage of patches to mask."""} , )
__lowercase: Tuple = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__lowercase: str = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = {}
if self.train_dir is not None:
snake_case_ = self.train_dir
if self.validation_dir is not None:
snake_case_ = self.validation_dir
snake_case_ = data_files if data_files else None
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[Any] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a """
"""checkpoint identifier on the hub. """
"""Don't set if you want to train a model from scratch."""
)
} , )
__lowercase: str = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(__SCREAMING_SNAKE_CASE)} , )
__lowercase: List[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
__lowercase: str = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
__lowercase: Tuple = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"""} , )
__lowercase: str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__lowercase: Any = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Name or path of preprocessor config."""})
__lowercase: Union[str, Any] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
__lowercase: int = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""The size (resolution) of each image. If not specified, will use `image_size` of the configuration."""
)
} , )
__lowercase: Optional[Any] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."""
)
} , )
__lowercase: List[Any] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Stride to use for the encoder."""} , )
class __A :
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : Optional[Any]=192 , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : Any=0.6 ) ->List[Any]:
"""simple docstring"""
snake_case_ = input_size
snake_case_ = mask_patch_size
snake_case_ = model_patch_size
snake_case_ = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("""Input size must be divisible by mask patch size""" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("""Mask patch size must be divisible by model patch size""" )
snake_case_ = self.input_size // self.mask_patch_size
snake_case_ = self.mask_patch_size // self.model_patch_size
snake_case_ = self.rand_size**2
snake_case_ = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : Tuple ) ->List[Any]:
"""simple docstring"""
snake_case_ = np.random.permutation(self.token_count )[: self.mask_count]
snake_case_ = np.zeros(self.token_count , dtype=_A )
snake_case_ = 1
snake_case_ = mask.reshape((self.rand_size, self.rand_size) )
snake_case_ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ = torch.stack([example["""pixel_values"""] for example in examples] )
snake_case_ = torch.stack([example["""mask"""] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def _a ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ , snake_case_ , snake_case_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mim""" , __lowerCamelCase , __lowerCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ = training_args.get_process_log_level()
logger.setLevel(__lowerCamelCase )
transformers.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
snake_case_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
snake_case_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
snake_case_ = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __lowerCamelCase ) and data_args.train_val_split > 0.0:
snake_case_ = ds["""train"""].train_test_split(data_args.train_val_split )
snake_case_ = split["""train"""]
snake_case_ = split["""test"""]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
snake_case_ = AutoConfig.from_pretrained(model_args.config_name_or_path , **__lowerCamelCase )
elif model_args.model_name_or_path:
snake_case_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowerCamelCase )
else:
snake_case_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(__lowerCamelCase , """decoder_type""" ):
snake_case_ = """simmim"""
# adapt config
snake_case_ = model_args.image_size if model_args.image_size is not None else config.image_size
snake_case_ = model_args.patch_size if model_args.patch_size is not None else config.patch_size
snake_case_ = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"""image_size""": model_args.image_size,
"""patch_size""": model_args.patch_size,
"""encoder_stride""": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
snake_case_ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **__lowerCamelCase )
elif model_args.model_name_or_path:
snake_case_ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **__lowerCamelCase )
else:
snake_case_ = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
snake_case_ = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
snake_case_ = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
snake_case_ = AutoModelForMaskedImageModeling.from_config(__lowerCamelCase )
if training_args.do_train:
snake_case_ = ds["""train"""].column_names
else:
snake_case_ = ds["""validation"""].column_names
if data_args.image_column_name is not None:
snake_case_ = data_args.image_column_name
elif "image" in column_names:
snake_case_ = """image"""
elif "img" in column_names:
snake_case_ = """img"""
else:
snake_case_ = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
snake_case_ = Compose(
[
Lambda(lambda _SCREAMING_SNAKE_CASE : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
snake_case_ = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(_SCREAMING_SNAKE_CASE ):
snake_case_ = [transforms(__lowerCamelCase ) for image in examples[image_column_name]]
snake_case_ = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
snake_case_ = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__lowerCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
snake_case_ = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__lowerCamelCase )
# Initialize our trainer
snake_case_ = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
snake_case_ = None
if training_args.resume_from_checkpoint is not None:
snake_case_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ = last_checkpoint
snake_case_ = trainer.train(resume_from_checkpoint=__lowerCamelCase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case_ = trainer.evaluate()
trainer.log_metrics("""eval""" , __lowerCamelCase )
trainer.save_metrics("""eval""" , __lowerCamelCase )
# Write model card and (optionally) push to hub
snake_case_ = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """masked-image-modeling""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-image-modeling"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCamelCase )
else:
trainer.create_model_card(**__lowerCamelCase )
if __name__ == "__main__":
main()
| 347
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="Wav2Vec2FeatureExtractor"
UpperCAmelCase_ ="AutoTokenizer"
def __init__( self , _A , _A ) -> Dict:
super().__init__(_A , _A )
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
@classmethod
def _UpperCamelCase ( cls , _A , **_A ) -> List[str]:
try:
return super().from_pretrained(_A , **_A )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , _A , )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE_ = WavaVecaCTCTokenizer.from_pretrained(_A , **_A )
return cls(feature_extractor=_A , tokenizer=_A )
def __call__( self , *_A , **_A ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_A , **_A )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''raw_speech''' )
else:
SCREAMING_SNAKE_CASE_ = kwargs.pop('''audio''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''sampling_rate''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''text''' , _A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(_A , **_A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ = encodings['''input_ids''']
return inputs
def _UpperCamelCase ( self , *_A , **_A ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_A , **_A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''input_features''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''labels''' , _A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor.pad(_A , *_A , **_A )
if labels is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad(_A , **_A )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE_ = labels['''input_ids''']
return input_features
def _UpperCamelCase ( self , *_A , **_A ) -> Any:
return self.tokenizer.batch_decode(*_A , **_A )
def _UpperCamelCase ( self , *_A , **_A ) -> Optional[Any]:
return self.tokenizer.decode(*_A , **_A )
@contextmanager
def _UpperCamelCase ( self ) -> Optional[int]:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.tokenizer
yield
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
| 299
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_A = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 231
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__UpperCAmelCase = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = test_results.split(''' ''' )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
SCREAMING_SNAKE_CASE_ = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__lowerCamelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''', __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
SCREAMING_SNAKE_CASE_ = line
SCREAMING_SNAKE_CASE_ = False
return failures
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = title
SCREAMING_SNAKE_CASE_ = doc_test_results['''time_spent'''].split(''',''' )[0]
SCREAMING_SNAKE_CASE_ = doc_test_results['''success''']
SCREAMING_SNAKE_CASE_ = doc_test_results['''failures''']
SCREAMING_SNAKE_CASE_ = self.n_success + self.n_failures
# Failures and success of the modeling tests
SCREAMING_SNAKE_CASE_ = doc_test_results
@property
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = [self._time_spent]
SCREAMING_SNAKE_CASE_ = 0
for time in time_spent:
SCREAMING_SNAKE_CASE_ = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_A ) == 1:
SCREAMING_SNAKE_CASE_ = [0, 0, time_parts[0]]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'''{int(_A )}h{int(_A )}m{int(_A )}s'''
@property
def _UpperCamelCase ( self ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCamelCase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _UpperCamelCase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = 40
SCREAMING_SNAKE_CASE_ = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_A , _A )}
SCREAMING_SNAKE_CASE_ = ''''''
for category, failures in category_failures.items():
if len(_A ) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_A )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_A )
@staticmethod
def _UpperCamelCase ( ) -> Any:
SCREAMING_SNAKE_CASE_ = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_A )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=_A , )
def _UpperCamelCase ( self ) -> Optional[int]:
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
SCREAMING_SNAKE_CASE_ = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else '''All tests passed.'''
SCREAMING_SNAKE_CASE_ = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=_A , )
def _UpperCamelCase ( self , _A , _A , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = ''''''
for key, value in failures.items():
SCREAMING_SNAKE_CASE_ = value[:200] + ''' [Truncated]''' if len(_A ) > 250 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
SCREAMING_SNAKE_CASE_ = job_name
SCREAMING_SNAKE_CASE_ = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
SCREAMING_SNAKE_CASE_ = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCamelCase ( self ) -> int:
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
SCREAMING_SNAKE_CASE_ = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
SCREAMING_SNAKE_CASE_ = sorted(self.doc_test_results.items() , key=lambda _A : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
SCREAMING_SNAKE_CASE_ = F'''*Num failures* :{len(job_result["failed"] )} \n'''
SCREAMING_SNAKE_CASE_ = job_result['''failures''']
SCREAMING_SNAKE_CASE_ = self.get_reply_blocks(_A , _A , _A , text=_A )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=F'''Results for {job}''' , blocks=_A , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = os.environ['''GITHUB_RUN_ID''']
SCREAMING_SNAKE_CASE_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
SCREAMING_SNAKE_CASE_ = requests.get(__lowerCamelCase ).json()
SCREAMING_SNAKE_CASE_ = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
SCREAMING_SNAKE_CASE_ = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''', __lowerCamelCase )
return {}
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
if os.path.exists(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = os.listdir(__lowerCamelCase )
for file in files:
try:
with open(os.path.join(__lowerCamelCase, __lowerCamelCase ), encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(__lowerCamelCase, __lowerCamelCase )}.''' ) from e
return _artifact
def A__ ( ):
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = name
SCREAMING_SNAKE_CASE_ = []
def __str__( self ) -> int:
return self.name
def _UpperCamelCase ( self , _A ) -> Tuple:
self.paths.append({'''name''': self.name, '''path''': path} )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = filter(os.path.isdir, os.listdir() )
for directory in directories:
SCREAMING_SNAKE_CASE_ = directory
if artifact_name not in _available_artifacts:
SCREAMING_SNAKE_CASE_ = Artifact(__lowerCamelCase )
_available_artifacts[artifact_name].add_path(__lowerCamelCase )
return _available_artifacts
if __name__ == "__main__":
__UpperCAmelCase = get_job_links()
__UpperCAmelCase = retrieve_available_artifacts()
__UpperCAmelCase = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__UpperCAmelCase = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__UpperCAmelCase = github_actions_job_links.get("run_doctests")
__UpperCAmelCase = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
__UpperCAmelCase = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = handle_test_results(artifact["stats"])
__UpperCAmelCase = failed
__UpperCAmelCase = success
__UpperCAmelCase = time_spent[1:-1] + ", "
__UpperCAmelCase = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
__UpperCAmelCase = line.replace("FAILED ", "")
__UpperCAmelCase = line.split()[0].replace("\n", "")
if "::" in line:
__UpperCAmelCase , __UpperCAmelCase = line.split("::")
else:
__UpperCAmelCase , __UpperCAmelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__UpperCAmelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__UpperCAmelCase = all_failures[test] if test in all_failures else "N/A"
__UpperCAmelCase = failure
break
__UpperCAmelCase = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 299
| 0
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase):
'''simple docstring'''
_A = KandinskyVaaPriorPipeline
_A = ['prompt']
_A = ['prompt', 'negative_prompt']
_A = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
_A = False
@property
def _lowerCamelCase ( self :Optional[Any] ) -> Any:
return 3_2
@property
def _lowerCamelCase ( self :Optional[int] ) -> Tuple:
return 3_2
@property
def _lowerCamelCase ( self :Tuple ) -> Optional[Any]:
return self.time_input_dim
@property
def _lowerCamelCase ( self :List[str] ) -> List[Any]:
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self :Optional[Any] ) -> int:
return 1_0_0
@property
def _lowerCamelCase ( self :Union[str, Any] ) -> List[Any]:
__UpperCamelCase : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def _lowerCamelCase ( self :List[Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
__UpperCamelCase : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(_A )
@property
def _lowerCamelCase ( self :int ) -> Tuple:
torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
__UpperCamelCase : Dict = PriorTransformer(**_A )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__UpperCamelCase : Union[str, Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def _lowerCamelCase ( self :str ) -> Optional[int]:
torch.manual_seed(0 )
__UpperCamelCase : Optional[int] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
__UpperCamelCase : Any = CLIPVisionModelWithProjection(_A )
return model
@property
def _lowerCamelCase ( self :Dict ) -> Optional[int]:
__UpperCamelCase : Tuple = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=_A , do_normalize=_A , do_resize=_A , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_2_4 , )
return image_processor
def _lowerCamelCase ( self :Tuple ) -> List[Any]:
__UpperCamelCase : List[str] = self.dummy_prior
__UpperCamelCase : Union[str, Any] = self.dummy_image_encoder
__UpperCamelCase : List[Any] = self.dummy_text_encoder
__UpperCamelCase : int = self.dummy_tokenizer
__UpperCamelCase : int = self.dummy_image_processor
__UpperCamelCase : Tuple = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=_A , clip_sample_range=10.0 , )
__UpperCamelCase : str = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def _lowerCamelCase ( self :Tuple , a :Dict , a :int=0 ) -> Any:
if str(_A ).startswith("mps" ):
__UpperCamelCase : Tuple = torch.manual_seed(_A )
else:
__UpperCamelCase : Any = torch.Generator(device=_A ).manual_seed(_A )
__UpperCamelCase : Union[str, Any] = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def _lowerCamelCase ( self :int ) -> Optional[int]:
__UpperCamelCase : List[str] = "cpu"
__UpperCamelCase : Tuple = self.get_dummy_components()
__UpperCamelCase : Dict = self.pipeline_class(**_A )
__UpperCamelCase : str = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__UpperCamelCase : Optional[Any] = pipe(**self.get_dummy_inputs(_A ) )
__UpperCamelCase : Optional[Any] = output.image_embeds
__UpperCamelCase : Optional[int] = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
__UpperCamelCase : Any = image[0, -1_0:]
__UpperCamelCase : List[str] = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
__UpperCamelCase : Optional[int] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self :Dict ) -> str:
__UpperCamelCase : Optional[int] = torch_device == "cpu"
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : List[str] = False
self._test_inference_batch_single_identical(
test_max_difference=_A , relax_max_difference=_A , test_mean_pixel_difference=_A , )
@skip_mps
def _lowerCamelCase ( self :Optional[Any] ) -> int:
__UpperCamelCase : Union[str, Any] = torch_device == "cpu"
__UpperCamelCase : List[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_A , test_mean_pixel_difference=_A , )
| 232
|
from __future__ import annotations
__UpperCAmelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
SCREAMING_SNAKE_CASE_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) )
] # the reference grid
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) )
] # the action grid
SCREAMING_SNAKE_CASE_ = init[0]
SCREAMING_SNAKE_CASE_ = init[1]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = g + heuristic[x][y] # cost from starting cell to destination cell
SCREAMING_SNAKE_CASE_ = [[f, g, x, y]]
SCREAMING_SNAKE_CASE_ = False # flag that is set when search is complete
SCREAMING_SNAKE_CASE_ = False # flag set if we can't find expand
while not found and not resign:
if len(__lowerCamelCase ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
SCREAMING_SNAKE_CASE_ = cell.pop()
SCREAMING_SNAKE_CASE_ = next_cell[2]
SCREAMING_SNAKE_CASE_ = next_cell[3]
SCREAMING_SNAKE_CASE_ = next_cell[1]
if x == goal[0] and y == goal[1]:
SCREAMING_SNAKE_CASE_ = True
else:
for i in range(len(__lowerCamelCase ) ): # to try out different valid actions
SCREAMING_SNAKE_CASE_ = x + DIRECTIONS[i][0]
SCREAMING_SNAKE_CASE_ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
SCREAMING_SNAKE_CASE_ = g + cost
SCREAMING_SNAKE_CASE_ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = i
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = goal[0]
SCREAMING_SNAKE_CASE_ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
SCREAMING_SNAKE_CASE_ = x - DIRECTIONS[action[x][y]][0]
SCREAMING_SNAKE_CASE_ = y - DIRECTIONS[action[x][y]][1]
SCREAMING_SNAKE_CASE_ = xa
SCREAMING_SNAKE_CASE_ = ya
invpath.append([x, y] )
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__lowerCamelCase ) ):
path.append(invpath[len(__lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCAmelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCAmelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCAmelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCAmelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCAmelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCAmelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCAmelCase = 99
__UpperCAmelCase , __UpperCAmelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 299
| 0
|
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
snake_case : str = len(__lowerCamelCase )
snake_case : Any = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
snake_case : List[Any] = True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
snake_case : Tuple = False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
snake_case : Dict = subset[i - 1][j]
if arr[i - 1] <= j:
snake_case : Dict = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 124
|
from __future__ import annotations
from collections.abc import Callable
__UpperCAmelCase = list[list[float | int]]
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [[0 for _ in range(size + 1 )] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for row in range(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = matrix[row][col]
SCREAMING_SNAKE_CASE_ = vector[row][0]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE_ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCamelCase, __lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = augmented[pivot_row], augmented[row]
for rowa in range(row + 1, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE_ = 0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, __lowerCamelCase ):
for row in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCamelCase, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 10 )] for row in range(__lowerCamelCase )
]
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [[0 for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = [[0] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for x_val, y_val in enumerate(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = (x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE_ = y_val
SCREAMING_SNAKE_CASE_ = solve(__lowerCamelCase, __lowerCamelCase )
def interpolated_func(__lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCamelCase ) )
return interpolated_func
def A__ ( __lowerCamelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def A__ ( __lowerCamelCase = question_function, __lowerCamelCase = 10 ):
SCREAMING_SNAKE_CASE_ = [func(__lowerCamelCase ) for x_val in range(1, order + 1 )]
SCREAMING_SNAKE_CASE_ = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for poly in polynomials:
SCREAMING_SNAKE_CASE_ = 1
while func(__lowerCamelCase ) == poly(__lowerCamelCase ):
x_val += 1
ret += poly(__lowerCamelCase )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
| 0
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
lowerCAmelCase__ = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def _A ( A__ ):
"""simple docstring"""
__lowercase = torch.load(__lowerCamelCase , map_location='''cpu''' )
return sd
def _A ( A__ , A__ , A__=rename_keys_prefix ):
"""simple docstring"""
__lowercase = OrderedDict()
__lowercase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__lowercase = key
for name_pair in rename_keys_prefix:
__lowercase = new_key.replace(name_pair[0] , name_pair[1] )
__lowercase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__lowercase = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), F"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."
# Get Config
if "pre" in checkpoint_path:
__lowercase = '''pretraining'''
if "vcr" in checkpoint_path:
__lowercase = {'''visual_embedding_dim''': 512}
elif "vqa_advanced" in checkpoint_path:
__lowercase = {'''visual_embedding_dim''': 2048}
elif "vqa" in checkpoint_path:
__lowercase = {'''visual_embedding_dim''': 2048}
elif "nlvr" in checkpoint_path:
__lowercase = {'''visual_embedding_dim''': 1024}
else:
raise NotImplementedError(F"No implementation found for `{checkpoint_path}`." )
else:
if "vcr" in checkpoint_path:
__lowercase = {'''visual_embedding_dim''': 512}
__lowercase = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
__lowercase = {'''visual_embedding_dim''': 2048}
__lowercase = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
__lowercase = {'''visual_embedding_dim''': 2048, '''num_labels''': 3129}
__lowercase = '''vqa'''
elif "nlvr" in checkpoint_path:
__lowercase = {
'''visual_embedding_dim''': 1024,
'''num_labels''': 2,
}
__lowercase = '''nlvr'''
__lowercase = VisualBertConfig(**__lowerCamelCase )
# Load State Dict
__lowercase = load_state_dict(__lowerCamelCase )
__lowercase = get_new_dict(__lowerCamelCase , __lowerCamelCase )
if model_type == "pretraining":
__lowercase = VisualBertForPreTraining(__lowerCamelCase )
elif model_type == "vqa":
__lowercase = VisualBertForQuestionAnswering(__lowerCamelCase )
elif model_type == "nlvr":
__lowercase = VisualBertForVisualReasoning(__lowerCamelCase )
elif model_type == "multichoice":
__lowercase = VisualBertForMultipleChoice(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
# Save Checkpoints
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase__ = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 104
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__UpperCAmelCase = logging.getLogger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="summarization"
UpperCAmelCase_ =["loss"]
UpperCAmelCase_ =ROUGE_KEYS
UpperCAmelCase_ ="rouge2"
def __init__( self , _A , **_A ) -> Tuple:
if hparams.sortish_sampler and hparams.gpus > 1:
SCREAMING_SNAKE_CASE_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(_A , num_labels=_A , mode=self.mode , **_A )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
SCREAMING_SNAKE_CASE_ = Path(self.output_dir ) / '''metrics.json'''
SCREAMING_SNAKE_CASE_ = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = defaultdict(_A )
SCREAMING_SNAKE_CASE_ = self.config.model_type
SCREAMING_SNAKE_CASE_ = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
SCREAMING_SNAKE_CASE_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
SCREAMING_SNAKE_CASE_ = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
SCREAMING_SNAKE_CASE_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
SCREAMING_SNAKE_CASE_ = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
SCREAMING_SNAKE_CASE_ = get_git_info()['''repo_sha''']
SCREAMING_SNAKE_CASE_ = hparams.num_workers
SCREAMING_SNAKE_CASE_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _A ):
SCREAMING_SNAKE_CASE_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
SCREAMING_SNAKE_CASE_ = self.decoder_start_token_id
SCREAMING_SNAKE_CASE_ = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
SCREAMING_SNAKE_CASE_ = self.hparams.eval_max_gen_length
else:
SCREAMING_SNAKE_CASE_ = self.model.config.max_length
SCREAMING_SNAKE_CASE_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _UpperCamelCase ( self , _A ) -> Dict[str, List[str]]:
SCREAMING_SNAKE_CASE_ = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(_A , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
SCREAMING_SNAKE_CASE_ = True
return readable_batch
def _UpperCamelCase ( self , _A , **_A ) -> List[str]:
return self.model(_A , **_A )
def _UpperCamelCase ( self , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
return lmap(str.strip , _A )
def _UpperCamelCase ( self , _A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = batch['''input_ids'''], batch['''attention_mask''']
SCREAMING_SNAKE_CASE_ = batch['''labels''']
if isinstance(self.model , _A ):
SCREAMING_SNAKE_CASE_ = self.model._shift_right(_A )
else:
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_A , _A )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
SCREAMING_SNAKE_CASE_ = decoder_input_ids
self.save_readable_batch(_A )
SCREAMING_SNAKE_CASE_ = self(_A , attention_mask=_A , decoder_input_ids=_A , use_cache=_A )
SCREAMING_SNAKE_CASE_ = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
SCREAMING_SNAKE_CASE_ = nn.CrossEntropyLoss(ignore_index=_A )
assert lm_logits.shape[-1] == self.vocab_size
SCREAMING_SNAKE_CASE_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
SCREAMING_SNAKE_CASE_ = nn.functional.log_softmax(_A , dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = label_smoothed_nll_loss(
_A , _A , self.hparams.label_smoothing , ignore_index=_A )
return (loss,)
@property
def _UpperCamelCase ( self ) -> int:
return self.tokenizer.pad_token_id
def _UpperCamelCase ( self , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = self._step(_A )
SCREAMING_SNAKE_CASE_ = dict(zip(self.loss_names , _A ) )
# tokens per batch
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].shape[0]
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].eq(self.pad ).sum()
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _UpperCamelCase ( self , _A , _A ) -> Dict:
return self._generative_step(_A )
def _UpperCamelCase ( self , _A , _A="val" ) -> Dict:
self.step_count += 1
SCREAMING_SNAKE_CASE_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
SCREAMING_SNAKE_CASE_ = losses['''loss''']
SCREAMING_SNAKE_CASE_ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
SCREAMING_SNAKE_CASE_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
SCREAMING_SNAKE_CASE_ = torch.tensor(_A ).type_as(_A )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_A )
SCREAMING_SNAKE_CASE_ = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
SCREAMING_SNAKE_CASE_ = self.step_count
self.metrics[prefix].append(_A ) # callback writes this to self.metrics_save_path
SCREAMING_SNAKE_CASE_ = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def _UpperCamelCase ( self , _A , _A ) -> Dict:
return calculate_rouge(_A , _A )
def _UpperCamelCase ( self , _A ) -> dict:
SCREAMING_SNAKE_CASE_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
SCREAMING_SNAKE_CASE_ = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_A , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
SCREAMING_SNAKE_CASE_ = (time.time() - ta) / batch['''input_ids'''].shape[0]
SCREAMING_SNAKE_CASE_ = self.ids_to_clean_text(_A )
SCREAMING_SNAKE_CASE_ = self.ids_to_clean_text(batch['''labels'''] )
SCREAMING_SNAKE_CASE_ = self._step(_A )
SCREAMING_SNAKE_CASE_ = dict(zip(self.loss_names , _A ) )
SCREAMING_SNAKE_CASE_ = self.calc_generative_metrics(_A , _A )
SCREAMING_SNAKE_CASE_ = np.mean(lmap(_A , _A ) )
base_metrics.update(gen_time=_A , gen_len=_A , preds=_A , target=_A , **_A )
return base_metrics
def _UpperCamelCase ( self , _A , _A ) -> Any:
return self._generative_step(_A )
def _UpperCamelCase ( self , _A ) -> Optional[int]:
return self.validation_epoch_end(_A , prefix='''test''' )
def _UpperCamelCase ( self , _A ) -> SeqaSeqDataset:
SCREAMING_SNAKE_CASE_ = self.n_obs[type_path]
SCREAMING_SNAKE_CASE_ = self.target_lens[type_path]
SCREAMING_SNAKE_CASE_ = self.dataset_class(
self.tokenizer , type_path=_A , n_obs=_A , max_target_length=_A , **self.dataset_kwargs , )
return dataset
def _UpperCamelCase ( self , _A , _A , _A = False ) -> DataLoader:
SCREAMING_SNAKE_CASE_ = self.get_dataset(_A )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ = dataset.make_sortish_sampler(_A , distributed=self.hparams.gpus > 1 )
return DataLoader(
_A , batch_size=_A , collate_fn=dataset.collate_fn , shuffle=_A , num_workers=self.num_workers , sampler=_A , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_A , batch_sampler=_A , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_A , batch_size=_A , collate_fn=dataset.collate_fn , shuffle=_A , num_workers=self.num_workers , sampler=_A , )
def _UpperCamelCase ( self ) -> DataLoader:
SCREAMING_SNAKE_CASE_ = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_A )
return dataloader
def _UpperCamelCase ( self ) -> DataLoader:
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def _UpperCamelCase ( self ) -> DataLoader:
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _UpperCamelCase ( _A , _A ) -> Dict:
BaseTransformer.add_model_specific_args(_A , _A )
add_generic_args(_A , _A )
parser.add_argument(
'''--max_source_length''' , default=1024 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_A )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_A )
parser.add_argument('''--max_tokens_per_batch''' , type=_A , default=_A )
parser.add_argument('''--logger_name''' , type=_A , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=_A , default=-1 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=_A , default=500 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=_A , default=-1 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=_A , default='''summarization''' , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=_A , default=0.0 , required=_A )
parser.add_argument('''--src_lang''' , type=_A , default='''''' , required=_A )
parser.add_argument('''--tgt_lang''' , type=_A , default='''''' , required=_A )
parser.add_argument('''--eval_beams''' , type=_A , default=_A , required=_A )
parser.add_argument(
'''--val_metric''' , type=_A , default=_A , required=_A , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=_A , default=_A , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=_A , default=1 , required=_A , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=_A , default=-1 , required=_A , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="translation"
UpperCAmelCase_ =["loss"]
UpperCAmelCase_ =["bleu"]
UpperCAmelCase_ ="bleu"
def __init__( self , _A , **_A ) -> Optional[int]:
super().__init__(_A , **_A )
SCREAMING_SNAKE_CASE_ = hparams.src_lang
SCREAMING_SNAKE_CASE_ = hparams.tgt_lang
def _UpperCamelCase ( self , _A , _A ) -> dict:
return calculate_bleu(_A , _A )
def A__ ( __lowerCamelCase, __lowerCamelCase=None ):
Path(args.output_dir ).mkdir(exist_ok=__lowerCamelCase )
check_output_dir(__lowerCamelCase, expected_items=3 )
if model is None:
if "summarization" in args.task:
SCREAMING_SNAKE_CASE_ = SummarizationModule(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = TranslationModule(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
SCREAMING_SNAKE_CASE_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ = os.environ.get('''WANDB_PROJECT''', __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = WandbLogger(name=model.output_dir.name, project=__lowerCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ = WandbLogger(name=model.output_dir.name, project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
SCREAMING_SNAKE_CASE_ = get_early_stopping_callback(model.val_metric, args.early_stopping_patience )
else:
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = args.val_metric == '''loss'''
SCREAMING_SNAKE_CASE_ = generic_train(
__lowerCamelCase, __lowerCamelCase, logging_callback=SeqaSeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(
args.output_dir, model.val_metric, args.save_top_k, __lowerCamelCase ), early_stopping_callback=__lowerCamelCase, logger=__lowerCamelCase, )
pickle_save(model.hparams, model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
SCREAMING_SNAKE_CASE_ = ''''''
SCREAMING_SNAKE_CASE_ = sorted(glob.glob(os.path.join(args.output_dir, '''*.ckpt''' ), recursive=__lowerCamelCase ) )
if checkpoints:
SCREAMING_SNAKE_CASE_ = checkpoints[-1]
SCREAMING_SNAKE_CASE_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
__UpperCAmelCase = pl.Trainer.add_argparse_args(parser)
__UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__UpperCAmelCase = parser.parse_args()
main(args)
| 299
| 0
|
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def _A ( _lowerCAmelCase="ro" , _lowerCAmelCase="en" , _lowerCAmelCase="wmt16" , _lowerCAmelCase=None ):
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('run pip install datasets' )
__lowercase =f"""{src_lang}-{tgt_lang}"""
print(f"""Converting {dataset}-{pair}""" )
__lowercase =datasets.load_dataset(__lowerCamelCase , __lowerCamelCase )
if save_dir is None:
__lowercase =f"""{dataset}-{pair}"""
__lowercase =Path(__lowerCamelCase )
save_dir.mkdir(exist_ok=__lowerCamelCase )
for split in ds.keys():
print(f"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
__lowercase ='val' if split == 'validation' else split
__lowercase =save_dir.joinpath(f"""{fn}.source""" )
__lowercase =save_dir.joinpath(f"""{fn}.target""" )
__lowercase =src_path.open('w+' )
__lowercase =tgt_path.open('w+' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__lowercase =x['translation']
src_fp.write(ex[src_lang] + '\n' )
tgt_fp.write(ex[tgt_lang] + '\n' )
print(f"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 166
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["LayoutLMv2FeatureExtractor"]
__UpperCAmelCase = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 299
| 0
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _UpperCAmelCase ( _lowerCamelCase : List[Any] ) -> Tuple:
_lowerCAmelCase : List[Any] = int(number**0.5 )
return number == sq * sq
def _UpperCAmelCase ( _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : Dict ) -> Any:
_lowerCAmelCase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_lowerCAmelCase : Dict = x_den * y_den * z_den
_lowerCAmelCase : List[str] = gcd(__lowerCamelCase , __lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] = 35 ) -> List[Any]:
_lowerCAmelCase : List[str] = set()
_lowerCAmelCase : int = 42
_lowerCAmelCase : List[Any] = Fraction(0 )
_lowerCAmelCase : int = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_lowerCAmelCase : int = x_num * y_den + x_den * y_num
_lowerCAmelCase : int = x_den * y_den
_lowerCAmelCase : Union[str, Any] = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowerCAmelCase : List[str] = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
_lowerCAmelCase : Optional[int] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_lowerCAmelCase : Union[str, Any] = x_den * x_den * y_den * y_den
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
_lowerCAmelCase : Any = int(sqrt(__lowerCamelCase ) )
_lowerCAmelCase : List[str] = int(sqrt(__lowerCamelCase ) )
_lowerCAmelCase : Any = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowerCAmelCase : List[str] = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=-1
_lowerCAmelCase : Tuple = x_num * y_num
_lowerCAmelCase : Tuple = x_den * y_num + x_num * y_den
_lowerCAmelCase : Any = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowerCAmelCase : Any = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
_lowerCAmelCase : Tuple = x_num * x_num * y_num * y_num
_lowerCAmelCase : List[str] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
_lowerCAmelCase : List[Any] = int(sqrt(__lowerCamelCase ) )
_lowerCAmelCase : str = int(sqrt(__lowerCamelCase ) )
_lowerCAmelCase : int = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowerCAmelCase : Tuple = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
for num, den in unique_s:
total += Fraction(__lowerCamelCase , __lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'{solution() = }')
| 309
|
import functools
def A__ ( __lowerCamelCase, __lowerCamelCase ):
# Validation
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(__lowerCamelCase ) != 3 or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(__lowerCamelCase ) == 0:
return 0
if min(__lowerCamelCase ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(__lowerCamelCase ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
SCREAMING_SNAKE_CASE_ = set(__lowerCamelCase )
@functools.cache
def dynamic_programming(__lowerCamelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ), costs[1] + dynamic_programming(index + 7 ), costs[2] + dynamic_programming(index + 30 ), )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
| 0
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class A :
'''simple docstring'''
def __init__( self : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : str=13 , __lowerCAmelCase : int=7 , __lowerCAmelCase : int=6 , __lowerCAmelCase : Optional[Any]=17 , __lowerCAmelCase : Tuple=23 , __lowerCAmelCase : Tuple=11 , __lowerCAmelCase : List[Any]=True , ) -> Optional[Any]:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = act_dim
A__ = state_dim
A__ = hidden_size
A__ = max_length
A__ = is_training
def a_ ( self : List[Any] ) -> Any:
"""simple docstring"""
A__ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
A__ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
A__ = floats_tensor((self.batch_size, self.seq_length, 1) )
A__ = floats_tensor((self.batch_size, self.seq_length, 1) )
A__ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=10_00 )
A__ = random_attention_mask((self.batch_size, self.seq_length) )
A__ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def a_ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def a_ ( self : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , ) -> List[Any]:
"""simple docstring"""
A__ = DecisionTransformerModel(config=_A )
model.to(_A )
model.eval()
A__ = model(_A , _A , _A , _A , _A , _A )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def a_ ( self : Any ) -> Optional[int]:
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {
"""states""": states,
"""actions""": actions,
"""rewards""": rewards,
"""returns_to_go""": returns_to_go,
"""timesteps""": timesteps,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_torch
class A (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[Any] = (DecisionTransformerModel,) if is_torch_available() else ()
__lowerCamelCase : Tuple = ()
__lowerCamelCase : Optional[Any] = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__lowerCamelCase : Dict = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__lowerCamelCase : Tuple = False
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Any = False
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Dict = False
__lowerCamelCase : List[str] = False
__lowerCamelCase : str = False
__lowerCamelCase : Union[str, Any] = False
def a_ ( self : str ) -> Dict:
"""simple docstring"""
A__ = DecisionTransformerModelTester(self )
A__ = ConfigTester(self , config_class=_A , hidden_size=37 )
def a_ ( self : Any ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self : Tuple ) -> str:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
@slow
def a_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = DecisionTransformerModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a_ ( self : Dict ) -> Tuple:
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_A )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = [
"""states""",
"""actions""",
"""rewards""",
"""returns_to_go""",
"""timesteps""",
"""attention_mask""",
]
self.assertListEqual(arg_names[: len(_A )] , _A )
@require_torch
class A (unittest.TestCase ):
'''simple docstring'''
@slow
def a_ ( self : int ) -> int:
"""simple docstring"""
A__ = 2 # number of steps of autoregressive prediction we will perform
A__ = 10 # defined by the RL environment, may be normalized
A__ = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" )
A__ = model.to(_A )
A__ = model.config
torch.manual_seed(0 )
A__ = torch.randn(1 , 1 , config.state_dim ).to(device=_A , dtype=torch.floataa ) # env.reset()
A__ = torch.tensor(
[[0.2_4_2_7_9_3, -0.2_8_6_9_3_0_7_4, 0.8_7_4_2_6_1_3], [0.6_7_8_1_5_2_7_4, -0.0_8_1_0_1_0_8_5, -0.1_2_9_5_2_1_4_7]] , device=_A )
A__ = torch.tensor(_A , device=_A , dtype=torch.floataa ).reshape(1 , 1 , 1 )
A__ = state
A__ = torch.zeros(1 , 0 , config.act_dim , device=_A , dtype=torch.floataa )
A__ = torch.zeros(1 , 0 , device=_A , dtype=torch.floataa )
A__ = torch.tensor(0 , device=_A , dtype=torch.long ).reshape(1 , 1 )
for step in range(_A ):
A__ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=_A )] , dim=1 )
A__ = torch.cat([rewards, torch.zeros(1 , 1 , device=_A )] , dim=1 )
A__ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
A__ , A__ , A__ = model(
states=_A , actions=_A , rewards=_A , returns_to_go=_A , timesteps=_A , attention_mask=_A , return_dict=_A , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
A__ , A__ , A__ , A__ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=_A , dtype=torch.floataa ),
1.0,
False,
{},
)
A__ = action_pred[0, -1]
A__ = torch.cat([states, state] , dim=1 )
A__ = returns_to_go[0, -1] - reward
A__ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
A__ = torch.cat(
[timesteps, torch.ones((1, 1) , device=_A , dtype=torch.long ) * (step + 1)] , dim=1 )
| 274
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__UpperCAmelCase = logging.get_logger(__name__)
enable_full_determinism()
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> List[str]:
return (3, 32, 32)
@property
def _UpperCamelCase ( self ) -> Tuple:
return (3, 32, 32)
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> Tuple:
return (4, 32, 32)
@property
def _UpperCamelCase ( self ) -> Tuple:
return (4, 32, 32)
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
SCREAMING_SNAKE_CASE_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model.to(_A )
SCREAMING_SNAKE_CASE_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _UpperCamelCase ( self ) -> Dict:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model_accelerate.to(_A )
model_accelerate.eval()
SCREAMING_SNAKE_CASE_ = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE_ = noise.to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] * noise.shape[0] ).to(_A )
SCREAMING_SNAKE_CASE_ = model_accelerate(_A , _A )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=_A , low_cpu_mem_usage=_A )
model_normal_load.to(_A )
model_normal_load.eval()
SCREAMING_SNAKE_CASE_ = model_normal_load(_A , _A )['''sample''']
assert torch_all_close(_A , _A , rtol=1E-3 )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(_A )
SCREAMING_SNAKE_CASE_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE_ = noise.to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] * noise.shape[0] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-3 ) )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self , _A=(32, 32) ) -> int:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> List[str]:
return (3, 32, 32)
@property
def _UpperCamelCase ( self ) -> List[Any]:
return (3, 32, 32)
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1E-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
@slow
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
SCREAMING_SNAKE_CASE_ = self.dummy_input
SCREAMING_SNAKE_CASE_ = floats_tensor((4, 3) + (256, 256) ).to(_A )
SCREAMING_SNAKE_CASE_ = noise
SCREAMING_SNAKE_CASE_ = model(**_A )
assert image is not None, "Make sure output is not None"
@slow
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(_A )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (256, 256)
SCREAMING_SNAKE_CASE_ = torch.ones((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [1E-4] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-2 ) )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(_A )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = torch.ones((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [1E-4] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-2 ) )
def _UpperCamelCase ( self ) -> Dict:
# not required for this model
pass
| 299
| 0
|
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def UpperCamelCase__ ( lowercase__ : Optional[Any] ):
for param in module.parameters():
snake_case : List[Any] = False
def UpperCamelCase__ ( ):
snake_case : Optional[int] = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
snake_case : List[Any] = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues"
" with generations." )
return device
def UpperCamelCase__ ( lowercase__ : Optional[Any] ):
snake_case : str = plt.imshow(__lowerCamelCase )
fig.axes.get_xaxis().set_visible(__lowerCamelCase )
fig.axes.get_yaxis().set_visible(__lowerCamelCase )
plt.show()
def UpperCamelCase__ ( ):
snake_case : Tuple = datetime.now()
snake_case : Any = current_time.strftime("%H:%M:%S" )
return timestamp
| 148
|
def A__ ( __lowerCamelCase = 10_00 ):
SCREAMING_SNAKE_CASE_ = 2**power
SCREAMING_SNAKE_CASE_ = 0
while n:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 299
| 0
|
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_A : Tuple = '''\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'''
_A : Optional[Any] = '''\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'''
_A : Tuple = '''\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def a ( self : int ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any = 1 , SCREAMING_SNAKE_CASE__ : Union[str, Any] = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_A , hypotheses=_A , min_len=_A , max_len=_A )
}
| 229
|
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["torch", "scipy"]
def __init__( self , *_A , **_A ) -> Tuple:
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Any:
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Tuple:
requires_backends(cls , ['''torch''', '''scipy'''] )
| 299
| 0
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_a = 42
@flax_register_to_config
class __lowerCAmelCase ( nn.Module , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_a = 32
_a = 4
_a = 4
_a = (
"""CrossAttnDownBlock2D""",
"""CrossAttnDownBlock2D""",
"""CrossAttnDownBlock2D""",
"""DownBlock2D""",
)
_a = ("""UpBlock2D""", """CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""")
_a = False
_a = (320, 640, 1_280, 1_280)
_a = 2
_a = 8
_a = None
_a = 1_280
_a = 0.0
_a = False
_a = jnp.floataa
_a = True
_a = 0
_a = False
def A__ ( self , lowerCAmelCase ) -> FrozenDict:
'''simple docstring'''
_lowercase =(1, self.in_channels, self.sample_size, self.sample_size)
_lowercase =jnp.zeros(_A , dtype=jnp.floataa )
_lowercase =jnp.ones((1,) , dtype=jnp.intaa )
_lowercase =jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_lowercase , _lowercase =jax.random.split(_A )
_lowercase ={'params': params_rng, 'dropout': dropout_rng}
return self.init(_A , _A , _A , _A )["params"]
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase =self.block_out_channels
_lowercase =block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_lowercase =self.num_attention_heads or self.attention_head_dim
# input
_lowercase =nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_lowercase =FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_lowercase =FlaxTimestepEmbedding(_A , dtype=self.dtype )
_lowercase =self.only_cross_attention
if isinstance(_A , _A ):
_lowercase =(only_cross_attention,) * len(self.down_block_types )
if isinstance(_A , _A ):
_lowercase =(num_attention_heads,) * len(self.down_block_types )
# down
_lowercase =[]
_lowercase =block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
_lowercase =output_channel
_lowercase =block_out_channels[i]
_lowercase =i == len(_A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_lowercase =FlaxCrossAttnDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_lowercase =FlaxDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_A )
_lowercase =down_blocks
# mid
_lowercase =FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
_lowercase =[]
_lowercase =list(reversed(_A ) )
_lowercase =list(reversed(_A ) )
_lowercase =list(reversed(_A ) )
_lowercase =reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
_lowercase =output_channel
_lowercase =reversed_block_out_channels[i]
_lowercase =reversed_block_out_channels[min(i + 1 , len(_A ) - 1 )]
_lowercase =i == len(_A ) - 1
if up_block_type == "CrossAttnUpBlock2D":
_lowercase =FlaxCrossAttnUpBlockaD(
in_channels=_A , out_channels=_A , prev_output_channel=_A , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_lowercase =FlaxUpBlockaD(
in_channels=_A , out_channels=_A , prev_output_channel=_A , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_A )
_lowercase =output_channel
_lowercase =up_blocks
# out
_lowercase =nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_lowercase =nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase = True , lowerCAmelCase = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
'''simple docstring'''
if not isinstance(_A , jnp.ndarray ):
_lowercase =jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_A , jnp.ndarray ) and len(timesteps.shape ) == 0:
_lowercase =timesteps.astype(dtype=jnp.floataa )
_lowercase =jnp.expand_dims(_A , 0 )
_lowercase =self.time_proj(_A )
_lowercase =self.time_embedding(_A )
# 2. pre-process
_lowercase =jnp.transpose(_A , (0, 2, 3, 1) )
_lowercase =self.conv_in(_A )
# 3. down
_lowercase =(sample,)
for down_block in self.down_blocks:
if isinstance(_A , _A ):
_lowercase , _lowercase =down_block(_A , _A , _A , deterministic=not train )
else:
_lowercase , _lowercase =down_block(_A , _A , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
_lowercase =()
for down_block_res_sample, down_block_additional_residual in zip(
_A , _A ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
_lowercase =new_down_block_res_samples
# 4. mid
_lowercase =self.mid_block(_A , _A , _A , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
_lowercase =down_block_res_samples[-(self.layers_per_block + 1) :]
_lowercase =down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_A , _A ):
_lowercase =up_block(
_A , temb=_A , encoder_hidden_states=_A , res_hidden_states_tuple=_A , deterministic=not train , )
else:
_lowercase =up_block(_A , temb=_A , res_hidden_states_tuple=_A , deterministic=not train )
# 6. post-process
_lowercase =self.conv_norm_out(_A )
_lowercase =nn.silu(_A )
_lowercase =self.conv_out(_A )
_lowercase =jnp.transpose(_A , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_A )
| 205
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=3 , _A=32 , _A=3 , _A=10 , _A=[10, 20, 30, 40] , _A=[1, 1, 2, 1] , _A=True , _A=True , _A="relu" , _A=3 , _A=None , ) -> Tuple:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embeddings_size
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = len(_A )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values
def _UpperCamelCase ( self ) -> Optional[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _UpperCamelCase ( self , _A , _A ) -> int:
SCREAMING_SNAKE_CASE_ = FlaxRegNetModel(config=_A )
SCREAMING_SNAKE_CASE_ = model(_A )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _UpperCamelCase ( self , _A , _A ) -> Any:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = FlaxRegNetForImageClassification(config=_A )
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =(FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> None:
SCREAMING_SNAKE_CASE_ = FlaxRegNetModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A , has_text_modality=_A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self ) -> str:
return
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _UpperCamelCase ( self ) -> int:
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _UpperCamelCase ( self ) -> Dict:
pass
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_A )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def _UpperCamelCase ( self ) -> Any:
def check_hidden_states_output(_A , _A , _A ):
SCREAMING_SNAKE_CASE_ = model_class(_A )
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_A , _A ) )
SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_ = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_A , _A , _A )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(_A , _A )
SCREAMING_SNAKE_CASE_ = model_class(_A )
@jax.jit
def model_jitted(_A , **_A ):
return model(pixel_values=_A , **_A )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE_ = model_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ = model_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase ( self ) -> Optional[int]:
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_A , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = model(**_A )
# verify the logits
SCREAMING_SNAKE_CASE_ = (1, 1000)
self.assertEqual(outputs.logits.shape , _A )
SCREAMING_SNAKE_CASE_ = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
| 299
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __A (__SCREAMING_SNAKE_CASE):
'''simple docstring'''
__lowercase: Tuple = """trocr"""
__lowercase: Tuple = ["""past_key_values"""]
__lowercase: Optional[Any] = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self : Union[str, Any] , UpperCAmelCase_ : List[str]=50_265 , UpperCAmelCase_ : int=1_024 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Optional[int]=16 , UpperCAmelCase_ : Tuple=4_096 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : List[Any]=512 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : str=2 , **UpperCAmelCase_ : str , ) ->int:
"""simple docstring"""
snake_case_ = vocab_size
snake_case_ = d_model
snake_case_ = decoder_layers
snake_case_ = decoder_attention_heads
snake_case_ = decoder_ffn_dim
snake_case_ = activation_function
snake_case_ = max_position_embeddings
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = init_std
snake_case_ = decoder_layerdrop
snake_case_ = use_cache
snake_case_ = scale_embedding
snake_case_ = use_learned_position_embeddings
snake_case_ = layernorm_embedding
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , **_A , )
| 347
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(number**0.5 )
return number == sq * sq
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
SCREAMING_SNAKE_CASE_ = x_den * y_den * z_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def A__ ( __lowerCamelCase = 35 ):
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = Fraction(0 )
SCREAMING_SNAKE_CASE_ = 42
for x_num in range(1, order + 1 ):
for x_den in range(x_num + 1, order + 1 ):
for y_num in range(1, order + 1 ):
for y_den in range(y_num + 1, order + 1 ):
# n=1
SCREAMING_SNAKE_CASE_ = x_num * y_den + x_den * y_num
SCREAMING_SNAKE_CASE_ = x_den * y_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
SCREAMING_SNAKE_CASE_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
SCREAMING_SNAKE_CASE_ = x_den * x_den * y_den * y_den
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=-1
SCREAMING_SNAKE_CASE_ = x_num * y_num
SCREAMING_SNAKE_CASE_ = x_den * y_num + x_num * y_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
SCREAMING_SNAKE_CASE_ = x_num * x_num * y_num * y_num
SCREAMING_SNAKE_CASE_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
for num, den in unique_s:
total += Fraction(__lowerCamelCase, __lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
| 0
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 231
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__( self , _A , _A = None , _A = None ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE_ = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
SCREAMING_SNAKE_CASE_ = torch.zeros(_A , _A )
else:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(_A )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
def __init__( self , _A , _A , _A , _A , _A , _A , ) -> Any:
super().__init__()
self.register_modules(
vqvae=_A , transformer=_A , text_encoder=_A , tokenizer=_A , scheduler=_A , learned_classifier_free_sampling_embeddings=_A , )
def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = len(_A ) if isinstance(_A , _A ) else 1
# get prompt text embeddings
SCREAMING_SNAKE_CASE_ = self.tokenizer(
_A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
SCREAMING_SNAKE_CASE_ = text_input_ids[:, : self.tokenizer.model_max_length]
SCREAMING_SNAKE_CASE_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
SCREAMING_SNAKE_CASE_ = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_A )
# duplicate text embeddings for each generation per prompt
SCREAMING_SNAKE_CASE_ = prompt_embeds.repeat_interleave(_A , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
SCREAMING_SNAKE_CASE_ = self.learned_classifier_free_sampling_embeddings.embeddings
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.unsqueeze(0 ).repeat(_A , 1 , 1 )
else:
SCREAMING_SNAKE_CASE_ = [''''''] * batch_size
SCREAMING_SNAKE_CASE_ = text_input_ids.shape[-1]
SCREAMING_SNAKE_CASE_ = self.tokenizer(
_A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_A )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.shape[1]
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.repeat(1 , _A , 1 )
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE_ = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , _A , _A = 100 , _A = 5.0 , _A = 1.0 , _A = 1 , _A = None , _A = None , _A = "pil" , _A = True , _A = None , _A = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = 1
elif isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = len(_A )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(_A )}''' )
SCREAMING_SNAKE_CASE_ = batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE_ = guidance_scale > 1.0
SCREAMING_SNAKE_CASE_ = self._encode_prompt(_A , _A , _A )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(_A )}.''' )
# get the initial completely masked latents unless the user supplied it
SCREAMING_SNAKE_CASE_ = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
SCREAMING_SNAKE_CASE_ = self.transformer.num_vector_embeds - 1
SCREAMING_SNAKE_CASE_ = torch.full(_A , _A ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
SCREAMING_SNAKE_CASE_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_A , device=self.device )
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps.to(self.device )
SCREAMING_SNAKE_CASE_ = latents
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the sample if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
SCREAMING_SNAKE_CASE_ = self.transformer(_A , encoder_hidden_states=_A , timestep=_A ).sample
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model_output.chunk(2 )
SCREAMING_SNAKE_CASE_ = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_A , dim=1 , keepdim=_A )
SCREAMING_SNAKE_CASE_ = self.truncate(_A , _A )
# remove `log(0)`'s (`-inf`s)
SCREAMING_SNAKE_CASE_ = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , timestep=_A , sample=_A , generator=_A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A , _A )
SCREAMING_SNAKE_CASE_ = self.vqvae.config.vq_embed_dim
SCREAMING_SNAKE_CASE_ = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
SCREAMING_SNAKE_CASE_ = self.vqvae.quantize.get_codebook_entry(_A , shape=_A )
SCREAMING_SNAKE_CASE_ = self.vqvae.decode(_A , force_not_quantize=_A ).sample
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
def _UpperCamelCase ( self , _A , _A ) -> torch.FloatTensor:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.sort(_A , 1 , descending=_A )
SCREAMING_SNAKE_CASE_ = torch.exp(_A )
SCREAMING_SNAKE_CASE_ = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
SCREAMING_SNAKE_CASE_ = torch.full_like(keep_mask[:, 0:1, :] , _A )
SCREAMING_SNAKE_CASE_ = torch.cat((all_true, keep_mask) , dim=1 )
SCREAMING_SNAKE_CASE_ = keep_mask[:, :-1, :]
SCREAMING_SNAKE_CASE_ = keep_mask.gather(1 , indices.argsort(1 ) )
SCREAMING_SNAKE_CASE_ = log_p_x_0.clone()
SCREAMING_SNAKE_CASE_ = -torch.inf # -inf = log(0)
return rv
| 299
| 0
|
import string
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any]) -> str:
'''simple docstring'''
__UpperCamelCase : Dict = ""
for i in sequence:
__UpperCamelCase : Any = ord(__lowerCamelCase)
if 65 <= extract <= 90:
output += chr(155 - extract)
elif 97 <= extract <= 122:
output += chr(219 - extract)
else:
output += i
return output
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : List[str] = string.ascii_letters
__UpperCamelCase : Optional[Any] = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(__lowerCamelCase)] if c in letters else c for c in sequence)
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
from timeit import timeit
print("Running performance benchmarks...")
__UpperCamelCase : List[str] = "from string import printable ; from __main__ import atbash, atbash_slow"
print(F'> atbash_slow(): {timeit("atbash_slow(printable)" , setup=__lowerCamelCase)} seconds')
print(F'> atbash(): {timeit("atbash(printable)" , setup=__lowerCamelCase)} seconds')
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"{example} encrypted in atbash: {atbash(example)}")
benchmark()
| 232
|
def A__ ( __lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
| 0
|
from __future__ import annotations
lowerCamelCase : List[Any] = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,) -> Union[str, Any]:
snake_case : Union[str, Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) )
] # the reference grid
snake_case : List[Any] = 1
snake_case : Optional[int] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) )
] # the action grid
snake_case : Dict = init[0]
snake_case : int = init[1]
snake_case : Union[str, Any] = 0
snake_case : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
snake_case : Dict = [[f, g, x, y]]
snake_case : Optional[int] = False # flag that is set when search is complete
snake_case : Optional[Any] = False # flag set if we can't find expand
while not found and not resign:
if len(__lowerCamelCase ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
snake_case : int = cell.pop()
snake_case : Tuple = next_cell[2]
snake_case : Any = next_cell[3]
snake_case : Tuple = next_cell[1]
if x == goal[0] and y == goal[1]:
snake_case : List[Any] = True
else:
for i in range(len(__lowerCamelCase ) ): # to try out different valid actions
snake_case : Union[str, Any] = x + DIRECTIONS[i][0]
snake_case : Union[str, Any] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
snake_case : List[str] = g + cost
snake_case : Optional[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
snake_case : Dict = 1
snake_case : Dict = i
snake_case : Union[str, Any] = []
snake_case : Tuple = goal[0]
snake_case : List[str] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
snake_case : Tuple = x - DIRECTIONS[action[x][y]][0]
snake_case : Any = y - DIRECTIONS[action[x][y]][1]
snake_case : Tuple = xa
snake_case : int = ya
invpath.append([x, y] )
snake_case : List[Any] = []
for i in range(len(__lowerCamelCase ) ):
path.append(invpath[len(__lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
lowerCamelCase : Dict = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
lowerCamelCase : List[Any] = [0, 0]
# all coordinates are given in format [y,x]
lowerCamelCase : Union[str, Any] = [len(grid) - 1, len(grid[0]) - 1]
lowerCamelCase : Any = 1
# the cost map which pushes the path closer to the goal
lowerCamelCase : List[str] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
lowerCamelCase : List[str] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
lowerCamelCase : Dict = 9_9
lowerCamelCase , lowerCamelCase : Optional[Any] = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 124
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCAmelCase = "pt"
elif is_tf_available():
__UpperCAmelCase = "tf"
else:
__UpperCAmelCase = "jax"
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =ByTaTokenizer
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Tuple:
super().setUp()
SCREAMING_SNAKE_CASE_ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCamelCase ( self ) -> List[str]:
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def _UpperCamelCase ( self , **_A ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _UpperCamelCase ( self , _A , _A=False , _A=20 , _A=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
SCREAMING_SNAKE_CASE_ = []
for i in range(len(_A ) ):
try:
SCREAMING_SNAKE_CASE_ = tokenizer.decode([i] , clean_up_tokenization_spaces=_A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda _A : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , _A ) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
SCREAMING_SNAKE_CASE_ = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
SCREAMING_SNAKE_CASE_ = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE_ = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
SCREAMING_SNAKE_CASE_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE_ = ''' ''' + output_txt
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
SCREAMING_SNAKE_CASE_ = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = '''Unicode €.'''
SCREAMING_SNAKE_CASE_ = tokenizer(_A )
SCREAMING_SNAKE_CASE_ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A )
self.assertEqual(_A , '''Unicode €.</s>''' )
SCREAMING_SNAKE_CASE_ = tokenizer('''e è é ê ë''' )
SCREAMING_SNAKE_CASE_ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A )
self.assertEqual(_A , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(_A , padding=_A , return_tensors=_A )
self.assertIsInstance(_A , _A )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
SCREAMING_SNAKE_CASE_ = tokenizer(_A , padding=_A , return_tensors=_A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , _A )
self.assertIn('''attention_mask''' , _A )
self.assertNotIn('''decoder_input_ids''' , _A )
self.assertNotIn('''decoder_attention_mask''' , _A )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = [
'''Summary of the text.''',
'''Another summary.''',
]
SCREAMING_SNAKE_CASE_ = tokenizer(
text_target=_A , max_length=32 , padding='''max_length''' , truncation=_A , return_tensors=_A )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization. </s>''']
SCREAMING_SNAKE_CASE_ = ['''Summary of the text. </s>''']
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
SCREAMING_SNAKE_CASE_ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(_A , text_target=_A )
self.assertEqual(_A , batch['''input_ids'''][0] )
self.assertEqual(_A , batch['''labels'''][0] )
def _UpperCamelCase ( self ) -> Dict:
# safety check on max_len default value so we are sure the test works
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = ''' He is very happy, UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
shutil.rmtree(_A )
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
SCREAMING_SNAKE_CASE_ = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_A )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(_A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(_A )
SCREAMING_SNAKE_CASE_ = [F'''<extra_id_{i}>''' for i in range(125 )]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(_A , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
_A , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_A )]
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(_A )
self.assertTrue(tokenizer.decode([255] ) == '''''' )
def _UpperCamelCase ( self ) -> int:
pass
def _UpperCamelCase ( self ) -> Any:
pass
def _UpperCamelCase ( self ) -> Any:
pass
def _UpperCamelCase ( self ) -> Optional[int]:
pass
def _UpperCamelCase ( self ) -> Union[str, Any]:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(_A , _A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(
_A , skip_special_tokens=_A )
for attr in attributes_list:
setattr(_A , attr + '''_id''' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '''_id''' ) , _A )
setattr(_A , attr + '''_id''' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '''_id''' ) , _A )
setattr(_A , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [] )
setattr(_A , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
| 299
| 0
|
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(__lowerCamelCase ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104
|
from cva import destroyAllWindows, imread, imshow, waitKey
def A__ ( __lowerCamelCase ):
# getting number of pixels in the image
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__UpperCAmelCase = imread("image_data/lena.jpg", 1)
# convert to its negative
__UpperCAmelCase = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 299
| 0
|
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =args.pruning_method
__lowercase =args.threshold
__lowercase =args.model_name_or_path.rstrip('/' )
__lowercase =args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
__lowercase =torch.load(os.path.join(__lowerCamelCase , 'pytorch_model.bin' ) )
__lowercase ={}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
__lowercase =tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
__lowercase =tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
__lowercase =tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
__lowercase =MagnitudeBinarizer.apply(inputs=__lowerCamelCase , threshold=__lowerCamelCase )
__lowercase =tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
__lowercase =name[:-6]
__lowercase =model[f"""{prefix_}mask_scores"""]
__lowercase =TopKBinarizer.apply(__lowerCamelCase , __lowerCamelCase )
__lowercase =tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
__lowercase =name[:-6]
__lowercase =model[f"""{prefix_}mask_scores"""]
__lowercase =ThresholdBinarizer.apply(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__lowercase =tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
__lowercase =name[:-6]
__lowercase =model[f"""{prefix_}mask_scores"""]
__lowercase , __lowercase =-0.1, 1.1
__lowercase =torch.sigmoid(__lowerCamelCase )
__lowercase =s * (r - l) + l
__lowercase =s_bar.clamp(min=0.0 , max=1.0 )
__lowercase =tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
__lowercase =os.path.join(
os.path.dirname(__lowerCamelCase ) , f"""bertarized_{os.path.basename(__lowerCamelCase )}""" )
if not os.path.isdir(__lowerCamelCase ):
shutil.copytree(__lowerCamelCase , __lowerCamelCase )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
lowerCamelCase = parser.parse_args()
main(args)
| 166
|
import math
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__lowerCamelCase )
def A__ ( __lowerCamelCase = 1 / 1_23_45 ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 3
while True:
SCREAMING_SNAKE_CASE_ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(__lowerCamelCase )
total_partitions += 1
if check_partition_perfect(__lowerCamelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__lowerCamelCase )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = ["""pixel_values"""]
def __init__( self , snake_case_ = True , snake_case_ = None , snake_case_ = PILImageResampling.BICUBIC , snake_case_ = True , snake_case_ = True , snake_case_ = 1 / 2_5_5 , snake_case_ = None , snake_case_ = True , snake_case_ = None , snake_case_ = None , **snake_case_ , ):
super().__init__(**_A )
_lowerCAmelCase : int = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
_lowerCAmelCase : Any = get_size_dict(_A )
_lowerCAmelCase : List[Any] = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
_lowerCAmelCase : Dict = get_size_dict(_A , default_to_square=_A , param_name="""crop_size""" )
_lowerCAmelCase : Union[str, Any] = do_resize
_lowerCAmelCase : Tuple = do_rescale
_lowerCAmelCase : str = do_normalize
_lowerCAmelCase : List[Any] = do_center_crop
_lowerCAmelCase : List[Any] = crop_size
_lowerCAmelCase : int = size
_lowerCAmelCase : Optional[int] = resample
_lowerCAmelCase : Tuple = rescale_factor
_lowerCAmelCase : List[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowerCAmelCase : List[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ = PILImageResampling.BILINEAR , snake_case_ = None , **snake_case_ , ):
_lowerCAmelCase : int = get_size_dict(_A )
if "shortest_edge" in size:
_lowerCAmelCase : List[str] = get_resize_output_image_size(_A , size=size["""shortest_edge"""] , default_to_square=_A )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_lowerCAmelCase : int = (size["""height"""], size["""width"""])
else:
raise ValueError(f'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_A , size=(size["""height"""], size["""width"""]) , data_format=_A , **_A )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ ):
return rescale(_A , scale=_A , data_format=_A , **_A )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ):
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : Any = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : str = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase : List[Any] = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase : Optional[Any] = get_size_dict(_A , param_name="""crop_size""" , default_to_square=_A )
_lowerCAmelCase : Dict = resample if resample is not None else self.resample
_lowerCAmelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : Union[str, Any] = image_std if image_std is not None else self.image_std
_lowerCAmelCase : Dict = size if size is not None else self.size
_lowerCAmelCase : Dict = get_size_dict(_A )
if not is_batched(_A ):
_lowerCAmelCase : Tuple = [images]
if not valid_images(_A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
_lowerCAmelCase : Dict = [to_numpy_array(_A ) for image in images]
if do_resize:
_lowerCAmelCase : Tuple = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
_lowerCAmelCase : Optional[Any] = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
_lowerCAmelCase : List[str] = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
_lowerCAmelCase : int = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
_lowerCAmelCase : Any = [to_channel_dimension_format(_A , _A ) for image in images]
_lowerCAmelCase : Tuple = {"""pixel_values""": images}
return BatchFeature(data=_A , tensor_type=_A )
| 309
|
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase ) if (len(__lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ), '''Stack'''.center(__lowerCamelCase ), '''Postfix'''.center(__lowerCamelCase ), sep=''' | ''', )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowerCamelCase ) == 0:
stack.append(__lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowerCamelCase ) # push x to stack
print(
x.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
while len(__lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
return "".join(__lowerCamelCase ) # return Postfix as str
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowerCamelCase ) ):
if infix[i] == "(":
SCREAMING_SNAKE_CASE_ = ''')''' # change "(" to ")"
elif infix[i] == ")":
SCREAMING_SNAKE_CASE_ = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(__lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__UpperCAmelCase = input("\nEnter an Infix Equation = ") # Input an Infix equation
__UpperCAmelCase = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 299
| 0
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
A : Optional[int] = {
'''sample_size''': 3_2,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1_0_0_0,
'''block_out_channels''': [3_2, 6_4],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
A : int = {
'''sample_size''': 6_4,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1_0_0_0,
'''block_out_channels''': [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
'''attention_head_dim''': 6_4,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
A : Tuple = {
'''sample_size''': 2_5_6,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
'''attention_head_dim''': 6_4,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
A : Tuple = {
'''num_train_timesteps''': 4_0,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
A : int = {
'''num_train_timesteps''': 2_0_1,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
A : Optional[Any] = {
'''num_train_timesteps''': 1_5_1,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def __lowerCamelCase ( __a :str ) -> Tuple:
"""simple docstring"""
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def __lowerCamelCase ( __a :Dict , __a :Tuple , __a :Dict , __a :Dict , __a :str=False ) -> int:
"""simple docstring"""
A__ = checkpoint[F'{old_prefix}.in_layers.0.weight']
A__ = checkpoint[F'{old_prefix}.in_layers.0.bias']
A__ = checkpoint[F'{old_prefix}.in_layers.2.weight']
A__ = checkpoint[F'{old_prefix}.in_layers.2.bias']
A__ = checkpoint[F'{old_prefix}.emb_layers.1.weight']
A__ = checkpoint[F'{old_prefix}.emb_layers.1.bias']
A__ = checkpoint[F'{old_prefix}.out_layers.0.weight']
A__ = checkpoint[F'{old_prefix}.out_layers.0.bias']
A__ = checkpoint[F'{old_prefix}.out_layers.3.weight']
A__ = checkpoint[F'{old_prefix}.out_layers.3.bias']
if has_skip:
A__ = checkpoint[F'{old_prefix}.skip_connection.weight']
A__ = checkpoint[F'{old_prefix}.skip_connection.bias']
return new_checkpoint
def __lowerCamelCase ( __a :int , __a :Tuple , __a :Any , __a :Optional[int] , __a :List[Any]=None ) -> Optional[Any]:
"""simple docstring"""
A__ , A__ , A__ = checkpoint[F'{old_prefix}.qkv.weight'].chunk(3 , dim=0 )
A__ , A__ , A__ = checkpoint[F'{old_prefix}.qkv.bias'].chunk(3 , dim=0 )
A__ = checkpoint[F'{old_prefix}.norm.weight']
A__ = checkpoint[F'{old_prefix}.norm.bias']
A__ = weight_q.squeeze(-1 ).squeeze(-1 )
A__ = bias_q.squeeze(-1 ).squeeze(-1 )
A__ = weight_k.squeeze(-1 ).squeeze(-1 )
A__ = bias_k.squeeze(-1 ).squeeze(-1 )
A__ = weight_v.squeeze(-1 ).squeeze(-1 )
A__ = bias_v.squeeze(-1 ).squeeze(-1 )
A__ = (
checkpoint[F'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 )
)
A__ = checkpoint[F'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def __lowerCamelCase ( __a :str , __a :Any ) -> Dict:
"""simple docstring"""
A__ = torch.load(__lowerCamelCase , map_location="""cpu""" )
A__ = {}
A__ = checkpoint["""time_embed.0.weight"""]
A__ = checkpoint["""time_embed.0.bias"""]
A__ = checkpoint["""time_embed.2.weight"""]
A__ = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
A__ = checkpoint["""label_emb.weight"""]
A__ = checkpoint["""input_blocks.0.0.weight"""]
A__ = checkpoint["""input_blocks.0.0.bias"""]
A__ = unet_config["""down_block_types"""]
A__ = unet_config["""layers_per_block"""]
A__ = unet_config["""attention_head_dim"""]
A__ = unet_config["""block_out_channels"""]
A__ = 1
A__ = channels_list[0]
for i, layer_type in enumerate(__lowerCamelCase ):
A__ = channels_list[i]
A__ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__lowerCamelCase ):
A__ = F'down_blocks.{i}.resnets.{j}'
A__ = F'input_blocks.{current_layer}.0'
A__ = True if j == 0 and downsample_block_has_skip else False
A__ = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_skip=__lowerCamelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__lowerCamelCase ):
A__ = F'down_blocks.{i}.resnets.{j}'
A__ = F'input_blocks.{current_layer}.0'
A__ = True if j == 0 and downsample_block_has_skip else False
A__ = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_skip=__lowerCamelCase )
A__ = F'down_blocks.{i}.attentions.{j}'
A__ = F'input_blocks.{current_layer}.1'
A__ = convert_attention(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
current_layer += 1
if i != len(__lowerCamelCase ) - 1:
A__ = F'down_blocks.{i}.downsamplers.0'
A__ = F'input_blocks.{current_layer}.0'
A__ = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
current_layer += 1
A__ = current_channels
# hardcoded the mid-block for now
A__ = """mid_block.resnets.0"""
A__ = """middle_block.0"""
A__ = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
A__ = """mid_block.attentions.0"""
A__ = """middle_block.1"""
A__ = convert_attention(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
A__ = """mid_block.resnets.1"""
A__ = """middle_block.2"""
A__ = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
A__ = 0
A__ = unet_config["""up_block_types"""]
for i, layer_type in enumerate(__lowerCamelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
A__ = F'up_blocks.{i}.resnets.{j}'
A__ = F'output_blocks.{current_layer}.0'
A__ = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_skip=__lowerCamelCase )
current_layer += 1
if i != len(__lowerCamelCase ) - 1:
A__ = F'up_blocks.{i}.upsamplers.0'
A__ = F'output_blocks.{current_layer-1}.1'
A__ = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
A__ = F'up_blocks.{i}.resnets.{j}'
A__ = F'output_blocks.{current_layer}.0'
A__ = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , has_skip=__lowerCamelCase )
A__ = F'up_blocks.{i}.attentions.{j}'
A__ = F'output_blocks.{current_layer}.1'
A__ = convert_attention(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
current_layer += 1
if i != len(__lowerCamelCase ) - 1:
A__ = F'up_blocks.{i}.upsamplers.0'
A__ = F'output_blocks.{current_layer-1}.2'
A__ = convert_resnet(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
A__ = checkpoint["""out.0.weight"""]
A__ = checkpoint["""out.0.bias"""]
A__ = checkpoint["""out.2.weight"""]
A__ = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
A : str = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
A : str = parser.parse_args()
A : List[str] = strabool(args.class_cond)
A : Optional[int] = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
A : Optional[int] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
A : Optional[Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
A : Any = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
A : Optional[int] = None
A : int = con_pt_to_diffuser(args.unet_path, unet_config)
A : Union[str, Any] = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
A : Optional[int] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
A : int = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
A : List[Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
A : int = CMStochasticIterativeScheduler(**scheduler_config)
A : Optional[int] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 274
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["input_features", "is_longer"]
def __init__( self , _A=64 , _A=48000 , _A=480 , _A=10 , _A=1024 , _A=0.0 , _A=False , _A = 0 , _A = 14000 , _A = None , _A = "fusion" , _A = "repeatpad" , **_A , ) -> Dict:
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
SCREAMING_SNAKE_CASE_ = top_db
SCREAMING_SNAKE_CASE_ = truncation
SCREAMING_SNAKE_CASE_ = padding
SCREAMING_SNAKE_CASE_ = fft_window_size
SCREAMING_SNAKE_CASE_ = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE_ = hop_length
SCREAMING_SNAKE_CASE_ = max_length_s
SCREAMING_SNAKE_CASE_ = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = frequency_min
SCREAMING_SNAKE_CASE_ = frequency_max
SCREAMING_SNAKE_CASE_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm=_A , mel_scale='''htk''' , )
SCREAMING_SNAKE_CASE_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , )
def _UpperCamelCase ( self ) -> Dict[str, Any]:
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _UpperCamelCase ( self , _A , _A = None ) -> np.ndarray:
SCREAMING_SNAKE_CASE_ = spectrogram(
_A , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_A , log_mel='''dB''' , )
return log_mel_spectrogram.T
def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE_ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE_ = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE_ = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.interpolate(
_A , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=_A )
SCREAMING_SNAKE_CASE_ = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _UpperCamelCase ( self , _A , _A , _A , _A ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE_ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE_ = len(_A ) - max_length
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE_ = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters )
SCREAMING_SNAKE_CASE_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE_ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE_ = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE_ = False
else:
SCREAMING_SNAKE_CASE_ = self._random_mel_fusion(_A , _A , _A )
SCREAMING_SNAKE_CASE_ = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
SCREAMING_SNAKE_CASE_ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE_ = int(max_length / len(_A ) )
SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_A , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE_ = int(max_length / len(_A ) )
SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_A , _A ) )
SCREAMING_SNAKE_CASE_ = np.pad(_A , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters )
SCREAMING_SNAKE_CASE_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , **_A , ) -> BatchFeature:
SCREAMING_SNAKE_CASE_ = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE_ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE_ = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE_ = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray(_A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
SCREAMING_SNAKE_CASE_ = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray(_A )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE_ = [
self._get_input_mel(_A , max_length if max_length else self.nb_max_samples , _A , _A )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for mel, longer in padded_inputs:
input_mel.append(_A )
is_longer.append(_A )
if truncation == "fusion" and sum(_A ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , len(_A ) )
SCREAMING_SNAKE_CASE_ = True
if isinstance(input_mel[0] , _A ):
SCREAMING_SNAKE_CASE_ = [np.asarray(_A , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE_ = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE_ = {'''input_features''': input_mel, '''is_longer''': is_longer}
SCREAMING_SNAKE_CASE_ = BatchFeature(_A )
if return_tensors is not None:
SCREAMING_SNAKE_CASE_ = input_features.convert_to_tensors(_A )
return input_features
| 299
| 0
|
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__A = "hf-internal-testing/tiny-random-bert"
__A = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
__A = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Union[str, Any] = cached_file(_A , _A )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_A ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_A , _A ) ) )
with open(os.path.join(_A , "refs" , "main" ) ) as f:
snake_case : Union[str, Any] = f.read()
self.assertEqual(_A , os.path.join(_A , "snapshots" , _A , _A ) )
self.assertTrue(os.path.isfile(_A ) )
# File is cached at the same place the second time.
snake_case : Union[str, Any] = cached_file(_A , _A )
self.assertEqual(_A , _A )
# Using a specific revision to test the full commit hash.
snake_case : List[str] = cached_file(_A , _A , revision="9b8c223" )
self.assertEqual(_A , os.path.join(_A , "snapshots" , _A , _A ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(_A , "is not a valid model identifier" ):
snake_case : Optional[Any] = cached_file("tiny-random-bert" , _A )
with self.assertRaisesRegex(_A , "is not a valid git identifier" ):
snake_case : Optional[Any] = cached_file(_A , _A , revision="aaaa" )
with self.assertRaisesRegex(_A , "does not appear to have a file named" ):
snake_case : List[str] = cached_file(_A , "conf" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(_A , "does not appear to have a file named" ):
snake_case : Optional[int] = cached_file(_A , "conf" )
with open(os.path.join(_A , "refs" , "main" ) ) as f:
snake_case : str = f.read()
self.assertTrue(os.path.isfile(os.path.join(_A , ".no_exist" , _A , "conf" ) ) )
snake_case : int = cached_file(_A , "conf" , _raise_exceptions_for_missing_entries=_A )
self.assertIsNone(_A )
snake_case : int = cached_file(_A , "conf" , local_files_only=_A , _raise_exceptions_for_missing_entries=_A )
self.assertIsNone(_A )
snake_case : Any = mock.Mock()
snake_case : str = 500
snake_case : Tuple = {}
snake_case : List[Any] = HTTPError
snake_case : Dict = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=_A ) as mock_head:
snake_case : Any = cached_file(_A , "conf" , _raise_exceptions_for_connection_errors=_A )
self.assertIsNone(_A )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , _A ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , _A ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , _A ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_A , "is not a valid model identifier" ):
get_file_from_repo("bert-base-case" , _A )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_A , "is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" , _A , revision="ahaha" )
snake_case : Tuple = get_file_from_repo("bert-base-cased" , _A )
# The name is the cached name which is not very easy to test, so instead we load the content.
snake_case : List[Any] = json.loads(open(_A , "r" ).read() )
self.assertEqual(config["hidden_size"] , 768 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : Dict = Path(_A ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(_A , "a.txt" ) , str(_A ) )
self.assertIsNone(get_file_from_repo(_A , "b.txt" ) )
| 148
|
import math
import random
def A__ ( __lowerCamelCase, __lowerCamelCase = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCAmelCase = 0.02
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = float(2 * (random.randint(1, 1_00 )) - 1 )
for _ in range(__lowerCamelCase ):
# Forward propagation
SCREAMING_SNAKE_CASE_ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
SCREAMING_SNAKE_CASE_ = (expected / 1_00) - layer_a
# Error delta
SCREAMING_SNAKE_CASE_ = layer_1_error * sigmoid_function(__lowerCamelCase, __lowerCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input("Expected value: "))
__UpperCAmelCase = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 299
| 0
|
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _lowercase :
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int]=sys.maxsize ) -> str:
__lowerCAmelCase = """bilinear"""
__lowerCAmelCase = max_size
__lowerCAmelCase = short_edge_length
def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase = []
for img in imgs:
__lowerCAmelCase , __lowerCAmelCase = img.shape[:2]
# later: provide list and randomly choose index for resize
__lowerCAmelCase = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
__lowerCAmelCase = size * 1.0 / min(_A , _A )
if h < w:
__lowerCAmelCase , __lowerCAmelCase = size, scale * w
else:
__lowerCAmelCase , __lowerCAmelCase = scale * h, size
if max(_A , _A ) > self.max_size:
__lowerCAmelCase = self.max_size * 1.0 / max(_A , _A )
__lowerCAmelCase = newh * scale
__lowerCAmelCase = neww * scale
__lowerCAmelCase = int(neww + 0.5 )
__lowerCAmelCase = int(newh + 0.5 )
if img.dtype == np.uinta:
__lowerCAmelCase = Image.fromarray(_A )
__lowerCAmelCase = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
__lowerCAmelCase = np.asarray(_A )
else:
__lowerCAmelCase = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
__lowerCAmelCase = nn.functional.interpolate(
_A , (newh, neww) , mode=self.interp_method , align_corners=_A ).squeeze(0 )
img_augs.append(_A )
return img_augs
class _lowercase :
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> int:
__lowerCAmelCase = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
__lowerCAmelCase = cfg.INPUT.FORMAT
__lowerCAmelCase = cfg.SIZE_DIVISIBILITY
__lowerCAmelCase = cfg.PAD_VALUE
__lowerCAmelCase = cfg.INPUT.MAX_SIZE_TEST
__lowerCAmelCase = cfg.MODEL.DEVICE
__lowerCAmelCase = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__lowerCAmelCase = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__lowerCAmelCase = lambda SCREAMING_SNAKE_CASE__ : (x - self.pixel_mean) / self.pixel_std
def a ( self : int , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
__lowerCAmelCase = tuple(max(_A ) for s in zip(*[img.shape for img in images] ) )
__lowerCAmelCase = [im.shape[-2:] for im in images]
__lowerCAmelCase = [
nn.functional.pad(
_A , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_A , _A )
]
return torch.stack(_A ), torch.tensor(_A )
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any]=False ) -> Optional[Any]:
with torch.no_grad():
if not isinstance(_A , _A ):
__lowerCAmelCase = [images]
if single_image:
assert len(_A ) == 1
for i in range(len(_A ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(_A , images.pop(_A ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
_A , torch.as_tensor(img_tensorize(images.pop(_A ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
__lowerCAmelCase = torch.tensor([im.shape[:2] for im in images] )
__lowerCAmelCase = self.aug(_A )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__lowerCAmelCase = [self.normalizer(_A ) for x in images]
# now pad them to do the following operations
__lowerCAmelCase , __lowerCAmelCase = self.pad(_A )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__lowerCAmelCase = torch.true_divide(_A , _A )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def UpperCamelCase_ ( snake_case_ : Optional[int] , snake_case_ : Tuple ) -> Tuple:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def UpperCamelCase_ ( snake_case_ : List[Any] , snake_case_ : str ) -> Optional[Any]:
'''simple docstring'''
assert torch.isfinite(__lowerCamelCase ).all(), "Box tensor contains infinite or NaN!"
__lowerCAmelCase , __lowerCAmelCase = box_size
tensor[:, 0].clamp_(min=0 , max=__lowerCamelCase )
tensor[:, 1].clamp_(min=0 , max=__lowerCamelCase )
tensor[:, 2].clamp_(min=0 , max=__lowerCamelCase )
tensor[:, 3].clamp_(min=0 , max=__lowerCamelCase )
| 229
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 299
| 0
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase=2 , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=10 , lowerCAmelCase=3 , lowerCAmelCase=32 * 4 , lowerCAmelCase=32 * 6 , lowerCAmelCase=4 , lowerCAmelCase=32 , ) -> Any:
'''simple docstring'''
_lowercase =parent
_lowercase =batch_size
_lowercase =is_training
_lowercase =use_auxiliary_loss
_lowercase =num_queries
_lowercase =num_channels
_lowercase =min_size
_lowercase =max_size
_lowercase =num_labels
_lowercase =mask_feature_size
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_A )
_lowercase =torch.ones([self.batch_size, self.min_size, self.max_size] , device=_A )
_lowercase =(
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_A ) > 0.5
).float()
_lowercase =(torch.rand((self.batch_size, self.num_labels) , device=_A ) > 0.5).long()
_lowercase =self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A__ ( self ) -> Dict:
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =self.prepare_config_and_inputs()
_lowercase ={'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> str:
'''simple docstring'''
_lowercase =output.encoder_hidden_states
_lowercase =output.pixel_decoder_hidden_states
_lowercase =output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_A ) , config.decoder_config.decoder_layers )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ) -> List[str]:
'''simple docstring'''
with torch.no_grad():
_lowercase =MaskFormerModel(config=_A )
model.to(_A )
model.eval()
_lowercase =model(pixel_values=_A , pixel_mask=_A )
_lowercase =model(_A , output_hidden_states=_A )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_A , _A )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Tuple:
'''simple docstring'''
_lowercase =MaskFormerForInstanceSegmentation(config=_A )
model.to(_A )
model.eval()
def comm_check_on_output(lowerCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_lowercase =model(pixel_values=_A , pixel_mask=_A )
_lowercase =model(_A )
comm_check_on_output(_A )
_lowercase =model(
pixel_values=_A , pixel_mask=_A , mask_labels=_A , class_labels=_A )
comm_check_on_output(_A )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_a = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_a = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def A__ ( self ) -> List[Any]:
'''simple docstring'''
_lowercase =MaskFormerModelTester(self )
_lowercase =ConfigTester(self , config_class=_A , has_text_modality=_A )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_A , **_A , output_hidden_states=_A )
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_A )
@unittest.skip(reason='MaskFormer does not use inputs_embeds' )
def A__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' )
def A__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormer is not a generative model' )
def A__ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormer does not use token embeddings' )
def A__ ( self ) -> str:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def A__ ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def A__ ( self ) -> int:
'''simple docstring'''
pass
def A__ ( self ) -> Tuple:
'''simple docstring'''
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(_A )
_lowercase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase =[*signature.parameters.keys()]
_lowercase =['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
@slow
def A__ ( self ) -> List[Any]:
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
_lowercase =MaskFormerModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase =(self.model_tester.min_size,) * 2
_lowercase ={
'pixel_values': torch.randn((2, 3, *size) , device=_A ),
'mask_labels': torch.randn((2, 10, *size) , device=_A ),
'class_labels': torch.zeros(2 , 10 , device=_A ).long(),
}
_lowercase =MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_A )
_lowercase =model(**_A )
self.assertTrue(outputs.loss is not None )
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_A , **_A , output_hidden_states=_A )
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(_A ).to(_A )
_lowercase =model(**_A , output_attentions=_A )
self.assertTrue(outputs.attentions is not None )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_lowercase =self.all_model_classes[1]
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =self.model_tester.prepare_config_and_inputs()
_lowercase =model_class(_A )
model.to(_A )
model.train()
_lowercase =model(_A , mask_labels=_A , class_labels=_A ).loss
loss.backward()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
_lowercase =self.all_model_classes[1]
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =self.model_tester.prepare_config_and_inputs()
_lowercase =True
_lowercase =True
_lowercase =model_class(_A )
model.to(_A )
model.train()
_lowercase =model(_A , mask_labels=_A , class_labels=_A )
_lowercase =outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowercase =outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_lowercase =outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowercase =outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_A )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowercase_ = 1e-4
def a ( ) -> str:
"""simple docstring"""
_lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' )
if is_vision_available()
else None
)
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(_A )
_lowercase =self.default_image_processor
_lowercase =prepare_img()
_lowercase =image_processor(_A , return_tensors='pt' ).to(_A )
_lowercase =inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_A , (1, 3, 800, 1_088) )
with torch.no_grad():
_lowercase =model(**_A )
_lowercase =torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _A , atol=_A ) )
_lowercase =torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _A , atol=_A ) )
_lowercase =torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _A , atol=_A ) )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
_lowercase =(
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(_A )
.eval()
)
_lowercase =self.default_image_processor
_lowercase =prepare_img()
_lowercase =image_processor(_A , return_tensors='pt' ).to(_A )
_lowercase =inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_A , (1, 3, 800, 1_088) )
with torch.no_grad():
_lowercase =model(**_A )
# masks_queries_logits
_lowercase =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowercase =[
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
_lowercase =torch.tensor(_A ).to(_A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _A , atol=_A ) )
# class_queries_logits
_lowercase =outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowercase =torch.tensor(
[
[1.6512e00, -5.2572e00, -3.3519e00],
[3.6169e-02, -5.9025e00, -2.9313e00],
[1.0766e-04, -7.7630e00, -5.1263e00],
] ).to(_A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _A , atol=_A ) )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
_lowercase =(
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' )
.to(_A )
.eval()
)
_lowercase =self.default_image_processor
_lowercase =prepare_img()
_lowercase =image_processor(_A , return_tensors='pt' ).to(_A )
_lowercase =inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_A , (1, 3, 800, 1_088) )
with torch.no_grad():
_lowercase =model(**_A )
# masks_queries_logits
_lowercase =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowercase =[[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
_lowercase =torch.tensor(_A ).to(_A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _A , atol=_A ) )
# class_queries_logits
_lowercase =outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowercase =torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(_A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _A , atol=_A ) )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =(
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(_A )
.eval()
)
_lowercase =self.default_image_processor
_lowercase =image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
_lowercase =inputs['pixel_values'].to(_A )
_lowercase =[el.to(_A ) for el in inputs['mask_labels']]
_lowercase =[el.to(_A ) for el in inputs['class_labels']]
with torch.no_grad():
_lowercase =model(**_A )
self.assertTrue(outputs.loss is not None )
| 205
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , _A = False , ) -> List[str]:
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Embedding(_A , _A )
SCREAMING_SNAKE_CASE_ = nn.Embedding(_A , _A )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = nn.Dropout(p=_A )
SCREAMING_SNAKE_CASE_ = TaConfig(
vocab_size=_A , d_model=_A , num_heads=_A , d_kv=_A , d_ff=_A , dropout_rate=_A , feed_forward_proj=_A , is_decoder=_A , is_encoder_decoder=_A , )
SCREAMING_SNAKE_CASE_ = nn.ModuleList()
for lyr_num in range(_A ):
SCREAMING_SNAKE_CASE_ = TaBlock(_A )
self.encoders.append(_A )
SCREAMING_SNAKE_CASE_ = TaLayerNorm(_A )
SCREAMING_SNAKE_CASE_ = nn.Dropout(p=_A )
def _UpperCamelCase ( self , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.token_embedder(_A )
SCREAMING_SNAKE_CASE_ = encoder_input_tokens.shape[1]
SCREAMING_SNAKE_CASE_ = torch.arange(_A , device=encoder_input_tokens.device )
x += self.position_encoding(_A )
SCREAMING_SNAKE_CASE_ = self.dropout_pre(_A )
# inverted the attention mask
SCREAMING_SNAKE_CASE_ = encoder_input_tokens.size()
SCREAMING_SNAKE_CASE_ = self.get_extended_attention_mask(_A , _A )
for lyr in self.encoders:
SCREAMING_SNAKE_CASE_ = lyr(_A , _A )[0]
SCREAMING_SNAKE_CASE_ = self.layer_norm(_A )
return self.dropout_post(_A ), encoder_inputs_mask
| 299
| 0
|
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__SCREAMING_SNAKE_CASE : Union[str, Any] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__SCREAMING_SNAKE_CASE : List[str] = typing.Union[np.floataa, int, float] # noqa: UP007
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
return np.sqrt(np.sum((np.asarray(__lowerCamelCase ) - np.asarray(__lowerCamelCase )) ** 2 ) )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
return sum((va - va) ** 2 for va, va in zip(__lowerCamelCase , __lowerCamelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def _a ( ) -> int:
from timeit import timeit
print("""Without Numpy""" )
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""" , number=10_000 , globals=globals() , ) )
print("""With Numpy""" )
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""" , number=10_000 , globals=globals() , ) )
benchmark()
| 347
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="Wav2Vec2FeatureExtractor"
UpperCAmelCase_ ="AutoTokenizer"
def __init__( self , _A , _A ) -> Dict:
super().__init__(_A , _A )
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
@classmethod
def _UpperCamelCase ( cls , _A , **_A ) -> List[str]:
try:
return super().from_pretrained(_A , **_A )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , _A , )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE_ = WavaVecaCTCTokenizer.from_pretrained(_A , **_A )
return cls(feature_extractor=_A , tokenizer=_A )
def __call__( self , *_A , **_A ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_A , **_A )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''raw_speech''' )
else:
SCREAMING_SNAKE_CASE_ = kwargs.pop('''audio''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''sampling_rate''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''text''' , _A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(_A , **_A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ = encodings['''input_ids''']
return inputs
def _UpperCamelCase ( self , *_A , **_A ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_A , **_A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''input_features''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''labels''' , _A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor.pad(_A , *_A , **_A )
if labels is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad(_A , **_A )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE_ = labels['''input_ids''']
return input_features
def _UpperCamelCase ( self , *_A , **_A ) -> Any:
return self.tokenizer.batch_decode(*_A , **_A )
def _UpperCamelCase ( self , *_A , **_A ) -> Optional[Any]:
return self.tokenizer.decode(*_A , **_A )
@contextmanager
def _UpperCamelCase ( self ) -> Optional[int]:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.tokenizer
yield
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
| 299
| 0
|
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
_A = input("Enter image url: ").strip()
print(f"""Downloading image from {url} ...""")
_A = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
_A = soup.find("meta", {"property": "og:image"})["content"]
_A = requests.get(image_url).content
_A = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, "wb") as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 231
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__UpperCAmelCase = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = test_results.split(''' ''' )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
SCREAMING_SNAKE_CASE_ = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__lowerCamelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''', __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
SCREAMING_SNAKE_CASE_ = line
SCREAMING_SNAKE_CASE_ = False
return failures
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = title
SCREAMING_SNAKE_CASE_ = doc_test_results['''time_spent'''].split(''',''' )[0]
SCREAMING_SNAKE_CASE_ = doc_test_results['''success''']
SCREAMING_SNAKE_CASE_ = doc_test_results['''failures''']
SCREAMING_SNAKE_CASE_ = self.n_success + self.n_failures
# Failures and success of the modeling tests
SCREAMING_SNAKE_CASE_ = doc_test_results
@property
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = [self._time_spent]
SCREAMING_SNAKE_CASE_ = 0
for time in time_spent:
SCREAMING_SNAKE_CASE_ = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_A ) == 1:
SCREAMING_SNAKE_CASE_ = [0, 0, time_parts[0]]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'''{int(_A )}h{int(_A )}m{int(_A )}s'''
@property
def _UpperCamelCase ( self ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCamelCase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _UpperCamelCase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = 40
SCREAMING_SNAKE_CASE_ = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_A , _A )}
SCREAMING_SNAKE_CASE_ = ''''''
for category, failures in category_failures.items():
if len(_A ) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_A )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_A )
@staticmethod
def _UpperCamelCase ( ) -> Any:
SCREAMING_SNAKE_CASE_ = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_A )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=_A , )
def _UpperCamelCase ( self ) -> Optional[int]:
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
SCREAMING_SNAKE_CASE_ = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else '''All tests passed.'''
SCREAMING_SNAKE_CASE_ = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=_A , )
def _UpperCamelCase ( self , _A , _A , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = ''''''
for key, value in failures.items():
SCREAMING_SNAKE_CASE_ = value[:200] + ''' [Truncated]''' if len(_A ) > 250 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
SCREAMING_SNAKE_CASE_ = job_name
SCREAMING_SNAKE_CASE_ = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
SCREAMING_SNAKE_CASE_ = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCamelCase ( self ) -> int:
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
SCREAMING_SNAKE_CASE_ = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
SCREAMING_SNAKE_CASE_ = sorted(self.doc_test_results.items() , key=lambda _A : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
SCREAMING_SNAKE_CASE_ = F'''*Num failures* :{len(job_result["failed"] )} \n'''
SCREAMING_SNAKE_CASE_ = job_result['''failures''']
SCREAMING_SNAKE_CASE_ = self.get_reply_blocks(_A , _A , _A , text=_A )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=F'''Results for {job}''' , blocks=_A , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = os.environ['''GITHUB_RUN_ID''']
SCREAMING_SNAKE_CASE_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
SCREAMING_SNAKE_CASE_ = requests.get(__lowerCamelCase ).json()
SCREAMING_SNAKE_CASE_ = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
SCREAMING_SNAKE_CASE_ = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''', __lowerCamelCase )
return {}
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
if os.path.exists(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = os.listdir(__lowerCamelCase )
for file in files:
try:
with open(os.path.join(__lowerCamelCase, __lowerCamelCase ), encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(__lowerCamelCase, __lowerCamelCase )}.''' ) from e
return _artifact
def A__ ( ):
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = name
SCREAMING_SNAKE_CASE_ = []
def __str__( self ) -> int:
return self.name
def _UpperCamelCase ( self , _A ) -> Tuple:
self.paths.append({'''name''': self.name, '''path''': path} )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = filter(os.path.isdir, os.listdir() )
for directory in directories:
SCREAMING_SNAKE_CASE_ = directory
if artifact_name not in _available_artifacts:
SCREAMING_SNAKE_CASE_ = Artifact(__lowerCamelCase )
_available_artifacts[artifact_name].add_path(__lowerCamelCase )
return _available_artifacts
if __name__ == "__main__":
__UpperCAmelCase = get_job_links()
__UpperCAmelCase = retrieve_available_artifacts()
__UpperCAmelCase = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__UpperCAmelCase = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__UpperCAmelCase = github_actions_job_links.get("run_doctests")
__UpperCAmelCase = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
__UpperCAmelCase = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = handle_test_results(artifact["stats"])
__UpperCAmelCase = failed
__UpperCAmelCase = success
__UpperCAmelCase = time_spent[1:-1] + ", "
__UpperCAmelCase = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
__UpperCAmelCase = line.replace("FAILED ", "")
__UpperCAmelCase = line.split()[0].replace("\n", "")
if "::" in line:
__UpperCAmelCase , __UpperCAmelCase = line.split("::")
else:
__UpperCAmelCase , __UpperCAmelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__UpperCAmelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__UpperCAmelCase = all_failures[test] if test in all_failures else "N/A"
__UpperCAmelCase = failure
break
__UpperCAmelCase = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 299
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self :Dict ) -> Tuple:
__UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
__UpperCamelCase : List[str] = BlipImageProcessor()
__UpperCamelCase : Dict = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
__UpperCamelCase : List[Any] = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" )
__UpperCamelCase : Optional[int] = InstructBlipProcessor(_A , _A , _A )
processor.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self :Dict , **a :List[Any] ) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).tokenizer
def _lowerCamelCase ( self :Optional[Any] , **a :List[Any] ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).image_processor
def _lowerCamelCase ( self :str , **a :Tuple ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).qformer_tokenizer
def _lowerCamelCase ( self :str ) -> Dict:
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self :Any ) -> Optional[Any]:
__UpperCamelCase : Optional[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__UpperCamelCase : List[str] = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self :List[Any] ) -> Tuple:
__UpperCamelCase : List[Any] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__UpperCamelCase : str = self.get_image_processor(do_normalize=_A , padding_value=1.0 )
__UpperCamelCase : str = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
self.assertIsInstance(processor.qformer_tokenizer , _A )
def _lowerCamelCase ( self :Any ) -> str:
__UpperCamelCase : List[Any] = self.get_image_processor()
__UpperCamelCase : Tuple = self.get_tokenizer()
__UpperCamelCase : List[str] = self.get_qformer_tokenizer()
__UpperCamelCase : Dict = InstructBlipProcessor(
tokenizer=_A , image_processor=_A , qformer_tokenizer=_A )
__UpperCamelCase : List[Any] = self.prepare_image_inputs()
__UpperCamelCase : Dict = image_processor(_A , return_tensors="np" )
__UpperCamelCase : Tuple = processor(images=_A , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCamelCase ( self :List[Any] ) -> int:
__UpperCamelCase : str = self.get_image_processor()
__UpperCamelCase : Optional[int] = self.get_tokenizer()
__UpperCamelCase : Tuple = self.get_qformer_tokenizer()
__UpperCamelCase : Dict = InstructBlipProcessor(
tokenizer=_A , image_processor=_A , qformer_tokenizer=_A )
__UpperCamelCase : Any = "lower newer"
__UpperCamelCase : int = processor(text=_A )
__UpperCamelCase : Dict = tokenizer(_A , return_token_type_ids=_A )
__UpperCamelCase : int = qformer_tokenizer(_A , return_token_type_ids=_A )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["qformer_" + key] )
def _lowerCamelCase ( self :int ) -> Union[str, Any]:
__UpperCamelCase : List[Any] = self.get_image_processor()
__UpperCamelCase : Tuple = self.get_tokenizer()
__UpperCamelCase : List[str] = self.get_qformer_tokenizer()
__UpperCamelCase : Dict = InstructBlipProcessor(
tokenizer=_A , image_processor=_A , qformer_tokenizer=_A )
__UpperCamelCase : int = "lower newer"
__UpperCamelCase : Optional[int] = self.prepare_image_inputs()
__UpperCamelCase : int = processor(text=_A , images=_A )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def _lowerCamelCase ( self :List[str] ) -> Optional[int]:
__UpperCamelCase : Optional[int] = self.get_image_processor()
__UpperCamelCase : List[Any] = self.get_tokenizer()
__UpperCamelCase : int = self.get_qformer_tokenizer()
__UpperCamelCase : Optional[Any] = InstructBlipProcessor(
tokenizer=_A , image_processor=_A , qformer_tokenizer=_A )
__UpperCamelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase : Any = processor.batch_decode(_A )
__UpperCamelCase : int = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def _lowerCamelCase ( self :Union[str, Any] ) -> List[str]:
__UpperCamelCase : str = self.get_image_processor()
__UpperCamelCase : Tuple = self.get_tokenizer()
__UpperCamelCase : Dict = self.get_qformer_tokenizer()
__UpperCamelCase : Dict = InstructBlipProcessor(
tokenizer=_A , image_processor=_A , qformer_tokenizer=_A )
__UpperCamelCase : str = "lower newer"
__UpperCamelCase : int = self.prepare_image_inputs()
__UpperCamelCase : str = processor(text=_A , images=_A )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
| 232
|
from __future__ import annotations
__UpperCAmelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
SCREAMING_SNAKE_CASE_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) )
] # the reference grid
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) )
] # the action grid
SCREAMING_SNAKE_CASE_ = init[0]
SCREAMING_SNAKE_CASE_ = init[1]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = g + heuristic[x][y] # cost from starting cell to destination cell
SCREAMING_SNAKE_CASE_ = [[f, g, x, y]]
SCREAMING_SNAKE_CASE_ = False # flag that is set when search is complete
SCREAMING_SNAKE_CASE_ = False # flag set if we can't find expand
while not found and not resign:
if len(__lowerCamelCase ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
SCREAMING_SNAKE_CASE_ = cell.pop()
SCREAMING_SNAKE_CASE_ = next_cell[2]
SCREAMING_SNAKE_CASE_ = next_cell[3]
SCREAMING_SNAKE_CASE_ = next_cell[1]
if x == goal[0] and y == goal[1]:
SCREAMING_SNAKE_CASE_ = True
else:
for i in range(len(__lowerCamelCase ) ): # to try out different valid actions
SCREAMING_SNAKE_CASE_ = x + DIRECTIONS[i][0]
SCREAMING_SNAKE_CASE_ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
SCREAMING_SNAKE_CASE_ = g + cost
SCREAMING_SNAKE_CASE_ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = i
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = goal[0]
SCREAMING_SNAKE_CASE_ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
SCREAMING_SNAKE_CASE_ = x - DIRECTIONS[action[x][y]][0]
SCREAMING_SNAKE_CASE_ = y - DIRECTIONS[action[x][y]][1]
SCREAMING_SNAKE_CASE_ = xa
SCREAMING_SNAKE_CASE_ = ya
invpath.append([x, y] )
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__lowerCamelCase ) ):
path.append(invpath[len(__lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCAmelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCAmelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCAmelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCAmelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCAmelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCAmelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCAmelCase = 99
__UpperCAmelCase , __UpperCAmelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 299
| 0
|
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
return "".join(chr(ord(__lowerCamelCase ) - 32 ) if """a""" <= char <= """z""" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 124
|
from __future__ import annotations
from collections.abc import Callable
__UpperCAmelCase = list[list[float | int]]
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [[0 for _ in range(size + 1 )] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for row in range(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = matrix[row][col]
SCREAMING_SNAKE_CASE_ = vector[row][0]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE_ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCamelCase, __lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = augmented[pivot_row], augmented[row]
for rowa in range(row + 1, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE_ = 0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, __lowerCamelCase ):
for row in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCamelCase, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 10 )] for row in range(__lowerCamelCase )
]
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [[0 for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = [[0] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for x_val, y_val in enumerate(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = (x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE_ = y_val
SCREAMING_SNAKE_CASE_ = solve(__lowerCamelCase, __lowerCamelCase )
def interpolated_func(__lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCamelCase ) )
return interpolated_func
def A__ ( __lowerCamelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def A__ ( __lowerCamelCase = question_function, __lowerCamelCase = 10 ):
SCREAMING_SNAKE_CASE_ = [func(__lowerCamelCase ) for x_val in range(1, order + 1 )]
SCREAMING_SNAKE_CASE_ = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for poly in polynomials:
SCREAMING_SNAKE_CASE_ = 1
while func(__lowerCamelCase ) == poly(__lowerCamelCase ):
x_val += 1
ret += poly(__lowerCamelCase )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
| 0
|
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : Optional[Any] ,lowercase__ : Optional[int]=1_3 ,lowercase__ : int=3 ,lowercase__ : str=True ,lowercase__ : Optional[int]=True ,lowercase__ : str=0.1 ,lowercase__ : List[Any]=0.1 ,lowercase__ : Optional[int]=2_2_4 ,lowercase__ : str=1_0_0_0 ,lowercase__ : List[str]=[3, 3, 6, 4] ,lowercase__ : List[str]=[4_8, 5_6, 1_1_2, 2_2_0] ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = num_labels
__lowercase = image_size
__lowercase = layer_depths
__lowercase = embed_dims
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return SwiftFormerConfig(
depths=self.layer_depths ,embed_dims=self.embed_dims ,mlp_ratio=4 ,downsamples=[True, True, True, True] ,hidden_act='''gelu''' ,num_labels=self.num_labels ,down_patch_size=3 ,down_stride=2 ,down_pad=1 ,drop_rate=0.0 ,drop_path_rate=0.0 ,use_layer_scale=_A ,layer_scale_init_value=1e-5 ,)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ):
__lowercase = SwiftFormerModel(config=_A )
model.to(_A )
model.eval()
__lowercase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.embed_dims[-1], 7, 7) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Tuple ,lowercase__ : Optional[int] ,lowercase__ : Any ):
__lowercase = self.num_labels
__lowercase = SwiftFormerForImageClassification(_A )
model.to(_A )
model.eval()
__lowercase = model(_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
__lowercase = SwiftFormerForImageClassification(_A )
model.to(_A )
model.eval()
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = model(_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
((__lowercase) , (__lowercase) , (__lowercase)) = self.prepare_config_and_inputs()
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Dict = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : List[Any] = False
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = SwiftFormerModelTester(self )
__lowercase = ConfigTester(
self ,config_class=_A ,has_text_modality=_A ,hidden_size=3_7 ,num_attention_heads=1_2 ,num_hidden_layers=1_2 ,)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
pass
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_A )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A ,nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_A )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_A )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = SwiftFormerModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
pass
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
def check_hidden_states_output(lowercase__ : Optional[int] ,lowercase__ : List[Any] ,lowercase__ : str ):
__lowercase = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_A ,_A ) )
__lowercase = outputs.hidden_states
__lowercase = 8
self.assertEqual(len(_A ) ,_A ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_A ) ):
self.assertEqual(
hidden_states[i].shape ,torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) ,)
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_A ,_A ,_A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_A ,_A ,_A )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
def _config_zero_init(lowercase__ : Any ):
__lowercase = copy.deepcopy(_A )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_A ,_A ,1e-1_0 )
if isinstance(getattr(_A ,_A ,_A ) ,_A ):
__lowercase = _config_zero_init(getattr(_A ,_A ) )
setattr(_A ,_A ,_A )
return configs_no_init
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = _config_zero_init(_A )
for model_class in self.all_model_classes:
__lowercase = model_class(config=_A )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() ,[0.0, 1.0] ,msg=F"Parameter {name} of model {model_class} seems not properly initialized" ,)
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
pass
def _A ( ):
"""simple docstring"""
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(_A )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_A ,return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
__lowercase = model(**_A )
# verify the logits
__lowercase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape ,_A )
__lowercase = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_A ,atol=1e-4 ) )
| 104
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__UpperCAmelCase = logging.getLogger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="summarization"
UpperCAmelCase_ =["loss"]
UpperCAmelCase_ =ROUGE_KEYS
UpperCAmelCase_ ="rouge2"
def __init__( self , _A , **_A ) -> Tuple:
if hparams.sortish_sampler and hparams.gpus > 1:
SCREAMING_SNAKE_CASE_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(_A , num_labels=_A , mode=self.mode , **_A )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
SCREAMING_SNAKE_CASE_ = Path(self.output_dir ) / '''metrics.json'''
SCREAMING_SNAKE_CASE_ = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = defaultdict(_A )
SCREAMING_SNAKE_CASE_ = self.config.model_type
SCREAMING_SNAKE_CASE_ = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
SCREAMING_SNAKE_CASE_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
SCREAMING_SNAKE_CASE_ = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
SCREAMING_SNAKE_CASE_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
SCREAMING_SNAKE_CASE_ = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
SCREAMING_SNAKE_CASE_ = get_git_info()['''repo_sha''']
SCREAMING_SNAKE_CASE_ = hparams.num_workers
SCREAMING_SNAKE_CASE_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _A ):
SCREAMING_SNAKE_CASE_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
SCREAMING_SNAKE_CASE_ = self.decoder_start_token_id
SCREAMING_SNAKE_CASE_ = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
SCREAMING_SNAKE_CASE_ = self.hparams.eval_max_gen_length
else:
SCREAMING_SNAKE_CASE_ = self.model.config.max_length
SCREAMING_SNAKE_CASE_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _UpperCamelCase ( self , _A ) -> Dict[str, List[str]]:
SCREAMING_SNAKE_CASE_ = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(_A , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
SCREAMING_SNAKE_CASE_ = True
return readable_batch
def _UpperCamelCase ( self , _A , **_A ) -> List[str]:
return self.model(_A , **_A )
def _UpperCamelCase ( self , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
return lmap(str.strip , _A )
def _UpperCamelCase ( self , _A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = batch['''input_ids'''], batch['''attention_mask''']
SCREAMING_SNAKE_CASE_ = batch['''labels''']
if isinstance(self.model , _A ):
SCREAMING_SNAKE_CASE_ = self.model._shift_right(_A )
else:
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_A , _A )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
SCREAMING_SNAKE_CASE_ = decoder_input_ids
self.save_readable_batch(_A )
SCREAMING_SNAKE_CASE_ = self(_A , attention_mask=_A , decoder_input_ids=_A , use_cache=_A )
SCREAMING_SNAKE_CASE_ = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
SCREAMING_SNAKE_CASE_ = nn.CrossEntropyLoss(ignore_index=_A )
assert lm_logits.shape[-1] == self.vocab_size
SCREAMING_SNAKE_CASE_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
SCREAMING_SNAKE_CASE_ = nn.functional.log_softmax(_A , dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = label_smoothed_nll_loss(
_A , _A , self.hparams.label_smoothing , ignore_index=_A )
return (loss,)
@property
def _UpperCamelCase ( self ) -> int:
return self.tokenizer.pad_token_id
def _UpperCamelCase ( self , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = self._step(_A )
SCREAMING_SNAKE_CASE_ = dict(zip(self.loss_names , _A ) )
# tokens per batch
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].shape[0]
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].eq(self.pad ).sum()
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _UpperCamelCase ( self , _A , _A ) -> Dict:
return self._generative_step(_A )
def _UpperCamelCase ( self , _A , _A="val" ) -> Dict:
self.step_count += 1
SCREAMING_SNAKE_CASE_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
SCREAMING_SNAKE_CASE_ = losses['''loss''']
SCREAMING_SNAKE_CASE_ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
SCREAMING_SNAKE_CASE_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
SCREAMING_SNAKE_CASE_ = torch.tensor(_A ).type_as(_A )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_A )
SCREAMING_SNAKE_CASE_ = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
SCREAMING_SNAKE_CASE_ = self.step_count
self.metrics[prefix].append(_A ) # callback writes this to self.metrics_save_path
SCREAMING_SNAKE_CASE_ = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def _UpperCamelCase ( self , _A , _A ) -> Dict:
return calculate_rouge(_A , _A )
def _UpperCamelCase ( self , _A ) -> dict:
SCREAMING_SNAKE_CASE_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
SCREAMING_SNAKE_CASE_ = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_A , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
SCREAMING_SNAKE_CASE_ = (time.time() - ta) / batch['''input_ids'''].shape[0]
SCREAMING_SNAKE_CASE_ = self.ids_to_clean_text(_A )
SCREAMING_SNAKE_CASE_ = self.ids_to_clean_text(batch['''labels'''] )
SCREAMING_SNAKE_CASE_ = self._step(_A )
SCREAMING_SNAKE_CASE_ = dict(zip(self.loss_names , _A ) )
SCREAMING_SNAKE_CASE_ = self.calc_generative_metrics(_A , _A )
SCREAMING_SNAKE_CASE_ = np.mean(lmap(_A , _A ) )
base_metrics.update(gen_time=_A , gen_len=_A , preds=_A , target=_A , **_A )
return base_metrics
def _UpperCamelCase ( self , _A , _A ) -> Any:
return self._generative_step(_A )
def _UpperCamelCase ( self , _A ) -> Optional[int]:
return self.validation_epoch_end(_A , prefix='''test''' )
def _UpperCamelCase ( self , _A ) -> SeqaSeqDataset:
SCREAMING_SNAKE_CASE_ = self.n_obs[type_path]
SCREAMING_SNAKE_CASE_ = self.target_lens[type_path]
SCREAMING_SNAKE_CASE_ = self.dataset_class(
self.tokenizer , type_path=_A , n_obs=_A , max_target_length=_A , **self.dataset_kwargs , )
return dataset
def _UpperCamelCase ( self , _A , _A , _A = False ) -> DataLoader:
SCREAMING_SNAKE_CASE_ = self.get_dataset(_A )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ = dataset.make_sortish_sampler(_A , distributed=self.hparams.gpus > 1 )
return DataLoader(
_A , batch_size=_A , collate_fn=dataset.collate_fn , shuffle=_A , num_workers=self.num_workers , sampler=_A , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_A , batch_sampler=_A , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_A , batch_size=_A , collate_fn=dataset.collate_fn , shuffle=_A , num_workers=self.num_workers , sampler=_A , )
def _UpperCamelCase ( self ) -> DataLoader:
SCREAMING_SNAKE_CASE_ = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_A )
return dataloader
def _UpperCamelCase ( self ) -> DataLoader:
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def _UpperCamelCase ( self ) -> DataLoader:
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _UpperCamelCase ( _A , _A ) -> Dict:
BaseTransformer.add_model_specific_args(_A , _A )
add_generic_args(_A , _A )
parser.add_argument(
'''--max_source_length''' , default=1024 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_A )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_A )
parser.add_argument('''--max_tokens_per_batch''' , type=_A , default=_A )
parser.add_argument('''--logger_name''' , type=_A , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=_A , default=-1 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=_A , default=500 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=_A , default=-1 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=_A , default='''summarization''' , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=_A , default=0.0 , required=_A )
parser.add_argument('''--src_lang''' , type=_A , default='''''' , required=_A )
parser.add_argument('''--tgt_lang''' , type=_A , default='''''' , required=_A )
parser.add_argument('''--eval_beams''' , type=_A , default=_A , required=_A )
parser.add_argument(
'''--val_metric''' , type=_A , default=_A , required=_A , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=_A , default=_A , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=_A , default=1 , required=_A , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=_A , default=-1 , required=_A , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="translation"
UpperCAmelCase_ =["loss"]
UpperCAmelCase_ =["bleu"]
UpperCAmelCase_ ="bleu"
def __init__( self , _A , **_A ) -> Optional[int]:
super().__init__(_A , **_A )
SCREAMING_SNAKE_CASE_ = hparams.src_lang
SCREAMING_SNAKE_CASE_ = hparams.tgt_lang
def _UpperCamelCase ( self , _A , _A ) -> dict:
return calculate_bleu(_A , _A )
def A__ ( __lowerCamelCase, __lowerCamelCase=None ):
Path(args.output_dir ).mkdir(exist_ok=__lowerCamelCase )
check_output_dir(__lowerCamelCase, expected_items=3 )
if model is None:
if "summarization" in args.task:
SCREAMING_SNAKE_CASE_ = SummarizationModule(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = TranslationModule(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
SCREAMING_SNAKE_CASE_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ = os.environ.get('''WANDB_PROJECT''', __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = WandbLogger(name=model.output_dir.name, project=__lowerCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ = WandbLogger(name=model.output_dir.name, project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
SCREAMING_SNAKE_CASE_ = get_early_stopping_callback(model.val_metric, args.early_stopping_patience )
else:
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = args.val_metric == '''loss'''
SCREAMING_SNAKE_CASE_ = generic_train(
__lowerCamelCase, __lowerCamelCase, logging_callback=SeqaSeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(
args.output_dir, model.val_metric, args.save_top_k, __lowerCamelCase ), early_stopping_callback=__lowerCamelCase, logger=__lowerCamelCase, )
pickle_save(model.hparams, model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
SCREAMING_SNAKE_CASE_ = ''''''
SCREAMING_SNAKE_CASE_ = sorted(glob.glob(os.path.join(args.output_dir, '''*.ckpt''' ), recursive=__lowerCamelCase ) )
if checkpoints:
SCREAMING_SNAKE_CASE_ = checkpoints[-1]
SCREAMING_SNAKE_CASE_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
__UpperCAmelCase = pl.Trainer.add_argparse_args(parser)
__UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__UpperCAmelCase = parser.parse_args()
main(args)
| 299
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase__ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase__ = """BlipImageProcessor"""
lowerCAmelCase__ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : int , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
__lowercase =False
super().__init__(_A , _A)
__lowercase =self.image_processor
def __call__( self : Any , _lowerCAmelCase : Union[str, Any] = None , _lowerCAmelCase : Optional[Any] = None , _lowerCAmelCase : List[str] = True , _lowerCAmelCase : Any = False , _lowerCAmelCase : Union[str, Any] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Any] = 0 , _lowerCAmelCase : Any = None , _lowerCAmelCase : Dict = None , _lowerCAmelCase : str = False , _lowerCAmelCase : Union[str, Any] = False , _lowerCAmelCase : Any = False , _lowerCAmelCase : Dict = False , _lowerCAmelCase : Dict = False , _lowerCAmelCase : str = True , _lowerCAmelCase : Optional[int] = None , **_lowerCAmelCase : Optional[int] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify either images or text.')
# Get only text
if images is None:
__lowercase =self.tokenizer
__lowercase =self.tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
return text_encoding
# add pixel_values
__lowercase =self.image_processor(_A , return_tensors=_A)
if text is not None:
__lowercase =self.tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
else:
__lowercase =None
if text_encoding is not None:
encoding_image_processor.update(_A)
return encoding_image_processor
def __lowerCamelCase ( self : Union[str, Any] , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : int):
'''simple docstring'''
return self.tokenizer.batch_decode(*_A , **_A)
def __lowerCamelCase ( self : int , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : int):
'''simple docstring'''
return self.tokenizer.decode(*_A , **_A)
@property
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =self.tokenizer.model_input_names
__lowercase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 166
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["LayoutLMv2FeatureExtractor"]
__UpperCAmelCase = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 299
| 0
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class a_ :
def __init__( self , snake_case_ = None ):
_lowerCAmelCase : List[str] = value
_lowerCAmelCase : Optional[int] = None # Added in order to delete a node easier
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Any = None
def __repr__( self ):
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'{self.value}': (self.left, self.right)} , indent=1 )
class a_ :
def __init__( self , snake_case_ = None ):
_lowerCAmelCase : Any = root
def __str__( self ):
return str(self.root )
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
if new_children is not None: # reset its kids
_lowerCAmelCase : Dict = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_A ): # If it is the right children
_lowerCAmelCase : Dict = new_children
else:
_lowerCAmelCase : Any = new_children
else:
_lowerCAmelCase : Union[str, Any] = new_children
def __UpperCamelCase ( self , snake_case_ ):
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __UpperCamelCase ( self ):
return self.root is None
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[Any] = Node(_A ) # create a new Node
if self.empty(): # if Tree is empty
_lowerCAmelCase : int = new_node # set its root
else: # Tree is not empty
_lowerCAmelCase : Dict = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
_lowerCAmelCase : List[Any] = new_node # We insert the new node in a leaf
break
else:
_lowerCAmelCase : Any = parent_node.left
else:
if parent_node.right is None:
_lowerCAmelCase : Dict = new_node
break
else:
_lowerCAmelCase : Optional[Any] = parent_node.right
_lowerCAmelCase : str = parent_node
def __UpperCamelCase ( self , *snake_case_ ):
for value in values:
self.__insert(_A )
def __UpperCamelCase ( self , snake_case_ ):
if self.empty():
raise IndexError("""Warning: Tree is empty! please use another.""" )
else:
_lowerCAmelCase : int = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
_lowerCAmelCase : int = node.left if value < node.value else node.right
return node
def __UpperCamelCase ( self , snake_case_ = None ):
if node is None:
if self.root is None:
return None
_lowerCAmelCase : Optional[Any] = self.root
if not self.empty():
while node.right is not None:
_lowerCAmelCase : List[str] = node.right
return node
def __UpperCamelCase ( self , snake_case_ = None ):
if node is None:
_lowerCAmelCase : Optional[int] = self.root
if self.root is None:
return None
if not self.empty():
_lowerCAmelCase : Tuple = self.root
while node.left is not None:
_lowerCAmelCase : int = node.left
return node
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[int] = self.search(_A ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_A , _A )
elif node.left is None: # Has only right children
self.__reassign_nodes(_A , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_A , node.left )
else:
_lowerCAmelCase : Dict = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
_lowerCAmelCase : Union[str, Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __UpperCamelCase ( self , snake_case_ ):
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __UpperCamelCase ( self , snake_case_=None ):
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
if node:
self.inorder(_A , node.left )
arr.append(node.value )
self.inorder(_A , node.right )
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = []
self.inorder(_A , _A ) # append all values to list using inorder traversal
return arr[k - 1]
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] ) -> Dict:
_lowerCAmelCase : Optional[int] = []
if curr_node is not None:
_lowerCAmelCase : Optional[int] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _UpperCAmelCase ( ) -> List[Any]:
_lowerCAmelCase : Optional[int] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
_lowerCAmelCase : List[Any] = BinarySearchTree()
for i in testlist:
t.insert(__lowerCamelCase )
# Prints all the elements of the list in order traversal
print(__lowerCamelCase )
if t.search(6 ) is not None:
print("""The value 6 exists""" )
else:
print("""The value 6 doesn\'t exist""" )
if t.search(-1 ) is not None:
print("""The value -1 exists""" )
else:
print("""The value -1 doesn\'t exist""" )
if not t.empty():
print("""Max Value: """ , t.get_max().value ) # type: ignore
print("""Min Value: """ , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(__lowerCamelCase )
print(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 309
|
import functools
def A__ ( __lowerCamelCase, __lowerCamelCase ):
# Validation
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(__lowerCamelCase ) != 3 or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(__lowerCamelCase ) == 0:
return 0
if min(__lowerCamelCase ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(__lowerCamelCase ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
SCREAMING_SNAKE_CASE_ = set(__lowerCamelCase )
@functools.cache
def dynamic_programming(__lowerCamelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ), costs[1] + dynamic_programming(index + 7 ), costs[2] + dynamic_programming(index + 30 ), )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
| 0
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class A :
'''simple docstring'''
def __init__( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : int=7 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : int=99 , __lowerCAmelCase : Optional[Any]=[1, 1, 2] , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : str=8 , __lowerCAmelCase : Any=37 , __lowerCAmelCase : Dict="gelu_new" , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : Dict=5_12 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=0.0_2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : List[str]=False , ) -> Dict:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = block_sizes
A__ = num_decoder_layers
A__ = d_model
A__ = n_head
A__ = d_head
A__ = d_inner
A__ = hidden_act
A__ = hidden_dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = 2
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = initializer_std
# Used in the tests to check the size of the first attention layer
A__ = n_head
# Used in the tests to check the size of the first hidden state
A__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
A__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
A__ = self.num_hidden_layers + 2
def a_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def a_ ( self : int , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , ) -> Dict:
"""simple docstring"""
A__ = TFFunnelModel(config=_A )
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A__ = model(_A )
A__ = [input_ids, input_mask]
A__ = model(_A )
A__ = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
A__ = False
A__ = TFFunnelModel(config=_A )
A__ = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
A__ = False
A__ = TFFunnelModel(config=_A )
A__ = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def a_ ( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , ) -> List[Any]:
"""simple docstring"""
A__ = TFFunnelBaseModel(config=_A )
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A__ = model(_A )
A__ = [input_ids, input_mask]
A__ = model(_A )
A__ = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
A__ = False
A__ = TFFunnelBaseModel(config=_A )
A__ = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
A__ = False
A__ = TFFunnelBaseModel(config=_A )
A__ = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def a_ ( self : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , ) -> Optional[Any]:
"""simple docstring"""
A__ = TFFunnelForPreTraining(config=_A )
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A__ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def a_ ( self : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , ) -> Union[str, Any]:
"""simple docstring"""
A__ = TFFunnelForMaskedLM(config=_A )
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A__ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , ) -> Dict:
"""simple docstring"""
A__ = self.num_labels
A__ = TFFunnelForSequenceClassification(config=_A )
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A__ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , ) -> Dict:
"""simple docstring"""
A__ = self.num_choices
A__ = TFFunnelForMultipleChoice(config=_A )
A__ = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
A__ = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
A__ = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
A__ = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
A__ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ ( self : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , ) -> List[str]:
"""simple docstring"""
A__ = self.num_labels
A__ = TFFunnelForTokenClassification(config=_A )
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A__ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , ) -> int:
"""simple docstring"""
A__ = TFFunnelForQuestionAnswering(config=_A )
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A__ = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class A (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
__lowerCamelCase : Union[str, Any] = (
{
'''feature-extraction''': (TFFunnelBaseModel, TFFunnelModel),
'''fill-mask''': TFFunnelForMaskedLM,
'''question-answering''': TFFunnelForQuestionAnswering,
'''text-classification''': TFFunnelForSequenceClassification,
'''token-classification''': TFFunnelForTokenClassification,
'''zero-shot''': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : Dict = False
def a_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
A__ = TFFunnelModelTester(self )
A__ = ConfigTester(self , config_class=_A )
def a_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def a_ ( self : Dict ) -> Tuple:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A )
def a_ ( self : Dict ) -> Dict:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def a_ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
def a_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
@require_tf
class A (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
__lowerCamelCase : Dict = False
__lowerCamelCase : Optional[int] = False
def a_ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
A__ = TFFunnelModelTester(self , base=_A )
A__ = ConfigTester(self , config_class=_A )
def a_ ( self : Any ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self : Dict ) -> Dict:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*_A )
def a_ ( self : Tuple ) -> str:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def a_ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
| 274
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__UpperCAmelCase = logging.get_logger(__name__)
enable_full_determinism()
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> List[str]:
return (3, 32, 32)
@property
def _UpperCamelCase ( self ) -> Tuple:
return (3, 32, 32)
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> Tuple:
return (4, 32, 32)
@property
def _UpperCamelCase ( self ) -> Tuple:
return (4, 32, 32)
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
SCREAMING_SNAKE_CASE_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model.to(_A )
SCREAMING_SNAKE_CASE_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _UpperCamelCase ( self ) -> Dict:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model_accelerate.to(_A )
model_accelerate.eval()
SCREAMING_SNAKE_CASE_ = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE_ = noise.to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] * noise.shape[0] ).to(_A )
SCREAMING_SNAKE_CASE_ = model_accelerate(_A , _A )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=_A , low_cpu_mem_usage=_A )
model_normal_load.to(_A )
model_normal_load.eval()
SCREAMING_SNAKE_CASE_ = model_normal_load(_A , _A )['''sample''']
assert torch_all_close(_A , _A , rtol=1E-3 )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(_A )
SCREAMING_SNAKE_CASE_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE_ = noise.to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] * noise.shape[0] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-3 ) )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self , _A=(32, 32) ) -> int:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> List[str]:
return (3, 32, 32)
@property
def _UpperCamelCase ( self ) -> List[Any]:
return (3, 32, 32)
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1E-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
@slow
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
SCREAMING_SNAKE_CASE_ = self.dummy_input
SCREAMING_SNAKE_CASE_ = floats_tensor((4, 3) + (256, 256) ).to(_A )
SCREAMING_SNAKE_CASE_ = noise
SCREAMING_SNAKE_CASE_ = model(**_A )
assert image is not None, "Make sure output is not None"
@slow
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(_A )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (256, 256)
SCREAMING_SNAKE_CASE_ = torch.ones((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [1E-4] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-2 ) )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(_A )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = torch.ones((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [1E-4] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-2 ) )
def _UpperCamelCase ( self ) -> Dict:
# not required for this model
pass
| 299
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 148
|
def A__ ( __lowerCamelCase = 10_00 ):
SCREAMING_SNAKE_CASE_ = 2**power
SCREAMING_SNAKE_CASE_ = 0
while n:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 299
| 0
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A : List[str] = '''pt'''
elif is_tf_available():
_A : int = '''tf'''
else:
_A : List[str] = '''jax'''
class _lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Dict = ByTaTokenizer
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
def a ( self : List[Any] ) -> Tuple:
super().setUp()
__lowerCAmelCase = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a ( self : Any ) -> List[str]:
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def a ( self : List[Any] , **SCREAMING_SNAKE_CASE__ : int ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : List[Any]=20 , SCREAMING_SNAKE_CASE__ : List[str]=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__lowerCAmelCase = []
for i in range(len(_A ) ):
try:
__lowerCAmelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=_A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__lowerCAmelCase = list(filter(lambda SCREAMING_SNAKE_CASE__ : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , _A ) )
__lowerCAmelCase = list(filter(lambda SCREAMING_SNAKE_CASE__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
__lowerCAmelCase = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
__lowerCAmelCase = toks + toks
# toks_str = [t[1] for t in toks]
__lowerCAmelCase = [t[0] for t in toks]
# Ensure consistency
__lowerCAmelCase = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
__lowerCAmelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
__lowerCAmelCase = """ """ + output_txt
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def a ( self : Union[str, Any] ) -> str:
__lowerCAmelCase = self.ta_base_tokenizer
__lowerCAmelCase = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
__lowerCAmelCase = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def a ( self : Tuple ) -> Any:
__lowerCAmelCase = self.ta_base_tokenizer
__lowerCAmelCase = """Unicode €."""
__lowerCAmelCase = tokenizer(_A )
__lowerCAmelCase = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded["""input_ids"""] , _A )
# decoding
__lowerCAmelCase = tokenizer.decode(_A )
self.assertEqual(_A , """Unicode €.</s>""" )
__lowerCAmelCase = tokenizer("""e è é ê ë""" )
__lowerCAmelCase = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded["""input_ids"""] , _A )
# decoding
__lowerCAmelCase = tokenizer.decode(_A )
self.assertEqual(_A , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def a ( self : int ) -> List[str]:
__lowerCAmelCase = self.ta_base_tokenizer
__lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
__lowerCAmelCase = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
__lowerCAmelCase = tokenizer(_A , padding=_A , return_tensors=_A )
self.assertIsInstance(_A , _A )
if FRAMEWORK != "jax":
__lowerCAmelCase = list(batch.input_ids.numpy()[0] )
else:
__lowerCAmelCase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def a ( self : Union[str, Any] ) -> str:
__lowerCAmelCase = self.ta_base_tokenizer
__lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__lowerCAmelCase = tokenizer(_A , padding=_A , return_tensors=_A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , _A )
self.assertIn("""attention_mask""" , _A )
self.assertNotIn("""decoder_input_ids""" , _A )
self.assertNotIn("""decoder_attention_mask""" , _A )
def a ( self : Tuple ) -> Tuple:
__lowerCAmelCase = self.ta_base_tokenizer
__lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
__lowerCAmelCase = tokenizer(
text_target=_A , max_length=32 , padding="""max_length""" , truncation=_A , return_tensors=_A )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def a ( self : Optional[int] ) -> List[Any]:
__lowerCAmelCase = self.ta_base_tokenizer
__lowerCAmelCase = ["""A long paragraph for summarization. </s>"""]
__lowerCAmelCase = ["""Summary of the text. </s>"""]
# fmt: off
__lowerCAmelCase = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
__lowerCAmelCase = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
__lowerCAmelCase = tokenizer(_A , text_target=_A )
self.assertEqual(_A , batch["""input_ids"""][0] )
self.assertEqual(_A , batch["""labels"""][0] )
def a ( self : Union[str, Any] ) -> Dict:
# safety check on max_len default value so we are sure the test works
__lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = """ He is very happy, UNwant\u00E9d,running"""
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__lowerCAmelCase = tokenizer.__class__.from_pretrained(_A )
__lowerCAmelCase = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
shutil.rmtree(_A )
__lowerCAmelCase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
__lowerCAmelCase = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__lowerCAmelCase = tokenizer.__class__.from_pretrained(_A )
__lowerCAmelCase = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__lowerCAmelCase = tokenizer.__class__.from_pretrained(_A , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_A )
def a ( self : Any ) -> int:
__lowerCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__lowerCAmelCase = json.load(_A )
with open(os.path.join(_A , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__lowerCAmelCase = json.load(_A )
__lowerCAmelCase = [f"""<extra_id_{i}>""" for i in range(1_25 )]
__lowerCAmelCase = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
__lowerCAmelCase = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(_A , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_A , _A )
with open(os.path.join(_A , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_A , _A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowerCAmelCase = tokenizer_class.from_pretrained(
_A , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowerCAmelCase = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=_A )]
__lowerCAmelCase = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def a ( self : Union[str, Any] ) -> str:
__lowerCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
__lowerCAmelCase = tokenizer_class.from_pretrained(_A )
self.assertTrue(tokenizer.decode([2_55] ) == """""" )
def a ( self : int ) -> int:
pass
def a ( self : Dict ) -> Any:
pass
def a ( self : str ) -> Any:
pass
def a ( self : Dict ) -> Optional[int]:
pass
def a ( self : Union[str, Any] ) -> Union[str, Any]:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
__lowerCAmelCase = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__lowerCAmelCase = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
__lowerCAmelCase = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(_A , _A )
def a ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__lowerCAmelCase = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
__lowerCAmelCase = 0
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(
_A , skip_special_tokens=_A )
for attr in attributes_list:
setattr(_A , attr + """_id""" , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + """_id""" ) , _A )
setattr(_A , attr + """_id""" , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + """_id""" ) , _A )
setattr(_A , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(_A , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(_A , """additional_special_tokens_ids""" ) , [] )
setattr(_A , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(_A , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(_A , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 229
|
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["torch", "scipy"]
def __init__( self , *_A , **_A ) -> Tuple:
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Any:
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Tuple:
requires_backends(cls , ['''torch''', '''scipy'''] )
| 299
| 0
|
from math import ceil, sqrt
def a ( A__ : Tuple = 1000000 ) -> Dict:
"""simple docstring"""
_lowercase =0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
_lowercase =max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
_lowercase =1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"{solution() = }")
| 205
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=3 , _A=32 , _A=3 , _A=10 , _A=[10, 20, 30, 40] , _A=[1, 1, 2, 1] , _A=True , _A=True , _A="relu" , _A=3 , _A=None , ) -> Tuple:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embeddings_size
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = len(_A )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values
def _UpperCamelCase ( self ) -> Optional[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _UpperCamelCase ( self , _A , _A ) -> int:
SCREAMING_SNAKE_CASE_ = FlaxRegNetModel(config=_A )
SCREAMING_SNAKE_CASE_ = model(_A )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _UpperCamelCase ( self , _A , _A ) -> Any:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = FlaxRegNetForImageClassification(config=_A )
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =(FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> None:
SCREAMING_SNAKE_CASE_ = FlaxRegNetModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A , has_text_modality=_A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self ) -> str:
return
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _UpperCamelCase ( self ) -> int:
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _UpperCamelCase ( self ) -> Dict:
pass
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_A )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def _UpperCamelCase ( self ) -> Any:
def check_hidden_states_output(_A , _A , _A ):
SCREAMING_SNAKE_CASE_ = model_class(_A )
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_A , _A ) )
SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_ = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_A , _A , _A )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(_A , _A )
SCREAMING_SNAKE_CASE_ = model_class(_A )
@jax.jit
def model_jitted(_A , **_A ):
return model(pixel_values=_A , **_A )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE_ = model_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ = model_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase ( self ) -> Optional[int]:
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_A , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = model(**_A )
# verify the logits
SCREAMING_SNAKE_CASE_ = (1, 1000)
self.assertEqual(outputs.logits.shape , _A )
SCREAMING_SNAKE_CASE_ = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
| 299
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[str] = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class __A (__SCREAMING_SNAKE_CASE):
'''simple docstring'''
__lowercase: Union[str, Any] = """deit"""
def __init__( self : int , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : Tuple=3_072 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Any=1E-12 , UpperCAmelCase_ : Optional[Any]=224 , UpperCAmelCase_ : Union[str, Any]=16 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[Any]=16 , **UpperCAmelCase_ : Any , ) ->Dict:
"""simple docstring"""
super().__init__(**_A )
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias
snake_case_ = encoder_stride
class __A (__SCREAMING_SNAKE_CASE):
'''simple docstring'''
__lowercase: Union[str, Any] = version.parse("""1.11""")
@property
def lowerCAmelCase ( self : List[Any] ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase ( self : Dict ) ->float:
"""simple docstring"""
return 1E-4
| 347
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(number**0.5 )
return number == sq * sq
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
SCREAMING_SNAKE_CASE_ = x_den * y_den * z_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def A__ ( __lowerCamelCase = 35 ):
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = Fraction(0 )
SCREAMING_SNAKE_CASE_ = 42
for x_num in range(1, order + 1 ):
for x_den in range(x_num + 1, order + 1 ):
for y_num in range(1, order + 1 ):
for y_den in range(y_num + 1, order + 1 ):
# n=1
SCREAMING_SNAKE_CASE_ = x_num * y_den + x_den * y_num
SCREAMING_SNAKE_CASE_ = x_den * y_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
SCREAMING_SNAKE_CASE_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
SCREAMING_SNAKE_CASE_ = x_den * x_den * y_den * y_den
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=-1
SCREAMING_SNAKE_CASE_ = x_num * y_num
SCREAMING_SNAKE_CASE_ = x_den * y_num + x_num * y_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
SCREAMING_SNAKE_CASE_ = x_num * x_num * y_num * y_num
SCREAMING_SNAKE_CASE_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
for num, den in unique_s:
total += Fraction(__lowerCamelCase, __lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
| 0
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowerCamelCase__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] = None , __lowerCAmelCase : List[Any] = None , __lowerCAmelCase : Optional[int] = None , ):
"""simple docstring"""
if config_name_or_path is None:
lowerCAmelCase_ = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
lowerCAmelCase_ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCAmelCase_ = question_encoder_name_or_path
lowerCAmelCase_ = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
lowerCAmelCase_ = RagConfig.from_pretrained(__lowerCamelCase )
lowerCAmelCase_ = AutoConfig.from_pretrained(__lowerCamelCase )
lowerCAmelCase_ = AutoConfig.from_pretrained(__lowerCamelCase )
lowerCAmelCase_ = gen_config
lowerCAmelCase_ = question_encoder_config
lowerCAmelCase_ = model_class.from_pretrained_question_encoder_generator(
__lowerCamelCase , __lowerCamelCase , config=__lowerCamelCase )
rag_model.save_pretrained(__lowerCamelCase )
# Sanity check.
model_class.from_pretrained(__lowerCamelCase )
# Save tokenizers.
lowerCAmelCase_ = AutoTokenizer.from_pretrained(__lowerCamelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
lowerCAmelCase_ = AutoTokenizer.from_pretrained(__lowerCamelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
choices=["rag_sequence", "rag_token"],
required=True,
type=str,
help="RAG model type: rag_sequence, rag_token",
)
parser.add_argument("--dest", type=str, required=True, help="Path to the output checkpoint directory.")
parser.add_argument("--generator_name_or_path", type=str, required=True, help="Generator model identifier")
parser.add_argument(
"--question_encoder_name_or_path", type=str, required=True, help="Question encoder model identifier"
)
parser.add_argument(
"--generator_tokenizer_name_or_path",
type=str,
help="Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``",
)
parser.add_argument(
"--question_encoder_tokenizer_name_or_path",
type=str,
help="Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``",
)
parser.add_argument(
"--config_name_or_path",
type=str,
help=(
"Identifier of the model config to use, if not provided, resolves to a base config for a given"
" ``model_type``"
),
)
_A = parser.parse_args()
_A = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 231
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__( self , _A , _A = None , _A = None ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE_ = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
SCREAMING_SNAKE_CASE_ = torch.zeros(_A , _A )
else:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(_A )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
def __init__( self , _A , _A , _A , _A , _A , _A , ) -> Any:
super().__init__()
self.register_modules(
vqvae=_A , transformer=_A , text_encoder=_A , tokenizer=_A , scheduler=_A , learned_classifier_free_sampling_embeddings=_A , )
def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = len(_A ) if isinstance(_A , _A ) else 1
# get prompt text embeddings
SCREAMING_SNAKE_CASE_ = self.tokenizer(
_A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
SCREAMING_SNAKE_CASE_ = text_input_ids[:, : self.tokenizer.model_max_length]
SCREAMING_SNAKE_CASE_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
SCREAMING_SNAKE_CASE_ = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_A )
# duplicate text embeddings for each generation per prompt
SCREAMING_SNAKE_CASE_ = prompt_embeds.repeat_interleave(_A , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
SCREAMING_SNAKE_CASE_ = self.learned_classifier_free_sampling_embeddings.embeddings
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.unsqueeze(0 ).repeat(_A , 1 , 1 )
else:
SCREAMING_SNAKE_CASE_ = [''''''] * batch_size
SCREAMING_SNAKE_CASE_ = text_input_ids.shape[-1]
SCREAMING_SNAKE_CASE_ = self.tokenizer(
_A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_A )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.shape[1]
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.repeat(1 , _A , 1 )
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE_ = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , _A , _A = 100 , _A = 5.0 , _A = 1.0 , _A = 1 , _A = None , _A = None , _A = "pil" , _A = True , _A = None , _A = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = 1
elif isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = len(_A )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(_A )}''' )
SCREAMING_SNAKE_CASE_ = batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE_ = guidance_scale > 1.0
SCREAMING_SNAKE_CASE_ = self._encode_prompt(_A , _A , _A )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(_A )}.''' )
# get the initial completely masked latents unless the user supplied it
SCREAMING_SNAKE_CASE_ = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
SCREAMING_SNAKE_CASE_ = self.transformer.num_vector_embeds - 1
SCREAMING_SNAKE_CASE_ = torch.full(_A , _A ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
SCREAMING_SNAKE_CASE_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_A , device=self.device )
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps.to(self.device )
SCREAMING_SNAKE_CASE_ = latents
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the sample if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
SCREAMING_SNAKE_CASE_ = self.transformer(_A , encoder_hidden_states=_A , timestep=_A ).sample
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model_output.chunk(2 )
SCREAMING_SNAKE_CASE_ = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_A , dim=1 , keepdim=_A )
SCREAMING_SNAKE_CASE_ = self.truncate(_A , _A )
# remove `log(0)`'s (`-inf`s)
SCREAMING_SNAKE_CASE_ = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , timestep=_A , sample=_A , generator=_A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A , _A )
SCREAMING_SNAKE_CASE_ = self.vqvae.config.vq_embed_dim
SCREAMING_SNAKE_CASE_ = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
SCREAMING_SNAKE_CASE_ = self.vqvae.quantize.get_codebook_entry(_A , shape=_A )
SCREAMING_SNAKE_CASE_ = self.vqvae.decode(_A , force_not_quantize=_A ).sample
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
def _UpperCamelCase ( self , _A , _A ) -> torch.FloatTensor:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.sort(_A , 1 , descending=_A )
SCREAMING_SNAKE_CASE_ = torch.exp(_A )
SCREAMING_SNAKE_CASE_ = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
SCREAMING_SNAKE_CASE_ = torch.full_like(keep_mask[:, 0:1, :] , _A )
SCREAMING_SNAKE_CASE_ = torch.cat((all_true, keep_mask) , dim=1 )
SCREAMING_SNAKE_CASE_ = keep_mask[:, :-1, :]
SCREAMING_SNAKE_CASE_ = keep_mask.gather(1 , indices.argsort(1 ) )
SCREAMING_SNAKE_CASE_ = log_p_x_0.clone()
SCREAMING_SNAKE_CASE_ = -torch.inf # -inf = log(0)
return rv
| 299
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self :Dict ) -> List[str]:
__UpperCamelCase : int = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
__UpperCamelCase : Optional[Any] = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(_A ) , torch_builtin(_A ) ) )
self.assertFalse(torch.allclose(gelu_python(_A ) , gelu_new(_A ) ) )
def _lowerCamelCase ( self :List[str] ) -> Union[str, Any]:
__UpperCamelCase : List[Any] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
__UpperCamelCase : Tuple = get_activation("gelu" )
__UpperCamelCase : Optional[Any] = get_activation("gelu_10" )
__UpperCamelCase : str = torch_builtin(_A )
__UpperCamelCase : Tuple = geluaa(_A )
__UpperCamelCase : Any = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(_A ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _lowerCamelCase ( self :Any ) -> int:
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(_A ):
get_activation("bogus" )
with self.assertRaises(_A ):
get_activation(_A )
def _lowerCamelCase ( self :str ) -> Dict:
__UpperCamelCase : Any = get_activation("gelu" )
__UpperCamelCase : List[str] = 1
__UpperCamelCase : List[str] = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_A ):
__UpperCamelCase : List[Any] = acta.a
| 232
|
def A__ ( __lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
| 0
|
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCamelCase : Union[str, Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase : List[Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCamelCase : Tuple = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
lowerCamelCase : Dict = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase : Optional[Any] = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCamelCase : List[str] = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
snake_case : List[Any] = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" ,__lowerCamelCase )
return [m.group(0 ) for m in matches]
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
snake_case : Any = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
snake_case : Union[str, Any] = {
config.replace("""Config""" ,"""""" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
snake_case : Any = collections.defaultdict(__lowerCamelCase )
snake_case : str = collections.defaultdict(__lowerCamelCase )
snake_case : str = collections.defaultdict(__lowerCamelCase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(__lowerCamelCase ):
snake_case : Union[str, Any] = None
if _re_tf_models.match(__lowerCamelCase ) is not None:
snake_case : Dict = tf_models
snake_case : Optional[int] = _re_tf_models.match(__lowerCamelCase ).groups()[0]
elif _re_flax_models.match(__lowerCamelCase ) is not None:
snake_case : str = flax_models
snake_case : Any = _re_flax_models.match(__lowerCamelCase ).groups()[0]
elif _re_pt_models.match(__lowerCamelCase ) is not None:
snake_case : Any = pt_models
snake_case : List[Any] = _re_pt_models.match(__lowerCamelCase ).groups()[0]
if lookup_dict is not None:
while len(__lowerCamelCase ) > 0:
if attr_name in model_prefix_to_model_type:
snake_case : int = True
break
# Try again after removing the last word in the name
snake_case : Any = """""".join(camel_case_split(__lowerCamelCase )[:-1] )
snake_case : Optional[Any] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
snake_case : Optional[int] = list(__lowerCamelCase )
all_models.sort()
snake_case : Optional[Any] = {"""model_type""": all_models}
snake_case : Optional[Any] = [pt_models[t] for t in all_models]
snake_case : str = [tf_models[t] for t in all_models]
snake_case : List[Any] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
snake_case : Any = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
snake_case : str = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
snake_case : Optional[int] = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
snake_case : str = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
snake_case : Any = """AutoTokenizer"""
snake_case : Tuple = [processors[t] for t in all_models]
return pd.DataFrame(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
snake_case : Dict = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
snake_case : Any = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
snake_case : Any = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ):
# The type of pipeline may not exist in this framework
if not hasattr(__lowerCamelCase ,__lowerCamelCase ):
continue
# First extract all model_names
snake_case : Union[str, Any] = []
for name in getattr(__lowerCamelCase ,__lowerCamelCase ).values():
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
model_names.append(__lowerCamelCase )
else:
model_names.extend(list(__lowerCamelCase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Optional[Any]:
snake_case : Tuple = get_frameworks_table()
snake_case : Tuple = Dataset.from_pandas(__lowerCamelCase )
snake_case : List[Any] = hf_hub_download(
"""huggingface/transformers-metadata""" ,"""pipeline_tags.json""" ,repo_type="""dataset""" ,token=__lowerCamelCase )
snake_case : int = Dataset.from_json(__lowerCamelCase )
snake_case : List[Any] = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(__lowerCamelCase ) )
}
snake_case : List[Any] = update_pipeline_and_auto_class_table(__lowerCamelCase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
snake_case : str = sorted(table.keys() )
snake_case : Optional[Any] = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
} )
snake_case : Tuple = Dataset.from_pandas(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(__lowerCamelCase ,"""frameworks.json""" ) )
tags_dataset.to_json(os.path.join(__lowerCamelCase ,"""pipeline_tags.json""" ) )
if commit_sha is not None:
snake_case : Any = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
snake_case : Union[str, Any] = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""" ,folder_path=__lowerCamelCase ,repo_type="""dataset""" ,token=__lowerCamelCase ,commit_message=__lowerCamelCase ,)
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
snake_case : Any = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
snake_case : List[Any] = transformers_module.pipelines.SUPPORTED_TASKS
snake_case : Dict = []
for key in pipeline_tasks:
if key not in in_table:
snake_case : Dict = pipeline_tasks[key]["""pt"""]
if isinstance(__lowerCamelCase ,(list, tuple) ):
snake_case : Union[str, Any] = model[0]
snake_case : Any = model.__name__
if model not in in_table.values():
missing.append(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
snake_case : Union[str, Any] = """, """.join(__lowerCamelCase )
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
lowerCamelCase : List[str] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 124
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCAmelCase = "pt"
elif is_tf_available():
__UpperCAmelCase = "tf"
else:
__UpperCAmelCase = "jax"
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =ByTaTokenizer
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Tuple:
super().setUp()
SCREAMING_SNAKE_CASE_ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCamelCase ( self ) -> List[str]:
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def _UpperCamelCase ( self , **_A ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _UpperCamelCase ( self , _A , _A=False , _A=20 , _A=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
SCREAMING_SNAKE_CASE_ = []
for i in range(len(_A ) ):
try:
SCREAMING_SNAKE_CASE_ = tokenizer.decode([i] , clean_up_tokenization_spaces=_A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda _A : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , _A ) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
SCREAMING_SNAKE_CASE_ = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
SCREAMING_SNAKE_CASE_ = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE_ = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
SCREAMING_SNAKE_CASE_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE_ = ''' ''' + output_txt
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
SCREAMING_SNAKE_CASE_ = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = '''Unicode €.'''
SCREAMING_SNAKE_CASE_ = tokenizer(_A )
SCREAMING_SNAKE_CASE_ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A )
self.assertEqual(_A , '''Unicode €.</s>''' )
SCREAMING_SNAKE_CASE_ = tokenizer('''e è é ê ë''' )
SCREAMING_SNAKE_CASE_ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A )
self.assertEqual(_A , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(_A , padding=_A , return_tensors=_A )
self.assertIsInstance(_A , _A )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
SCREAMING_SNAKE_CASE_ = tokenizer(_A , padding=_A , return_tensors=_A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , _A )
self.assertIn('''attention_mask''' , _A )
self.assertNotIn('''decoder_input_ids''' , _A )
self.assertNotIn('''decoder_attention_mask''' , _A )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = [
'''Summary of the text.''',
'''Another summary.''',
]
SCREAMING_SNAKE_CASE_ = tokenizer(
text_target=_A , max_length=32 , padding='''max_length''' , truncation=_A , return_tensors=_A )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization. </s>''']
SCREAMING_SNAKE_CASE_ = ['''Summary of the text. </s>''']
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
SCREAMING_SNAKE_CASE_ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(_A , text_target=_A )
self.assertEqual(_A , batch['''input_ids'''][0] )
self.assertEqual(_A , batch['''labels'''][0] )
def _UpperCamelCase ( self ) -> Dict:
# safety check on max_len default value so we are sure the test works
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = ''' He is very happy, UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
shutil.rmtree(_A )
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
SCREAMING_SNAKE_CASE_ = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_A )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(_A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(_A )
SCREAMING_SNAKE_CASE_ = [F'''<extra_id_{i}>''' for i in range(125 )]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(_A , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
_A , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_A )]
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(_A )
self.assertTrue(tokenizer.decode([255] ) == '''''' )
def _UpperCamelCase ( self ) -> int:
pass
def _UpperCamelCase ( self ) -> Any:
pass
def _UpperCamelCase ( self ) -> Any:
pass
def _UpperCamelCase ( self ) -> Optional[int]:
pass
def _UpperCamelCase ( self ) -> Union[str, Any]:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(_A , _A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(
_A , skip_special_tokens=_A )
for attr in attributes_list:
setattr(_A , attr + '''_id''' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '''_id''' ) , _A )
setattr(_A , attr + '''_id''' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '''_id''' ) , _A )
setattr(_A , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [] )
setattr(_A , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
| 299
| 0
|
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Tuple ,lowercase__ : Any ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : str=0.0 ,lowercase__ : Dict = None ,lowercase__ : str = "geglu" ,lowercase__ : List[Any] = None ,lowercase__ : Optional[int] = False ,lowercase__ : List[str] = False ,lowercase__ : Optional[Any] = False ,lowercase__ : Any = False ,lowercase__ : List[str] = True ,lowercase__ : Optional[int] = "layer_norm" ,lowercase__ : Optional[int] = False ,):
super().__init__()
__lowercase = only_cross_attention
__lowercase = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
__lowercase = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
F" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
__lowercase = AdaLayerNorm(_A ,_A )
elif self.use_ada_layer_norm_zero:
__lowercase = AdaLayerNormZero(_A ,_A )
else:
__lowercase = nn.LayerNorm(_A ,elementwise_affine=_A )
__lowercase = Attention(
query_dim=_A ,heads=_A ,dim_head=_A ,dropout=_A ,bias=_A ,cross_attention_dim=cross_attention_dim if only_cross_attention else None ,upcast_attention=_A ,)
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
__lowercase = (
AdaLayerNorm(_A ,_A )
if self.use_ada_layer_norm
else nn.LayerNorm(_A ,elementwise_affine=_A )
)
__lowercase = Attention(
query_dim=_A ,cross_attention_dim=cross_attention_dim if not double_self_attention else None ,heads=_A ,dim_head=_A ,dropout=_A ,bias=_A ,upcast_attention=_A ,) # is self-attn if encoder_hidden_states is none
else:
__lowercase = None
__lowercase = None
# 3. Feed-forward
__lowercase = nn.LayerNorm(_A ,elementwise_affine=_A )
__lowercase = FeedForward(_A ,dropout=_A ,activation_fn=_A ,final_dropout=_A )
# let chunk size default to None
__lowercase = None
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Tuple ):
# Sets chunk feed-forward
__lowercase = chunk_size
__lowercase = dim
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Optional[Any] ,lowercase__ : Union[str, Any] = None ,lowercase__ : Any = None ,lowercase__ : Optional[int] = None ,lowercase__ : Any = None ,lowercase__ : Union[str, Any] = None ,lowercase__ : Tuple = None ,):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
__lowercase = self.norma(_A ,_A )
elif self.use_ada_layer_norm_zero:
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = self.norma(
_A ,_A ,_A ,hidden_dtype=hidden_states.dtype )
else:
__lowercase = self.norma(_A )
__lowercase = cross_attention_kwargs if cross_attention_kwargs is not None else {}
__lowercase = self.attna(
_A ,encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None ,attention_mask=_A ,**_A ,)
if self.use_ada_layer_norm_zero:
__lowercase = gate_msa.unsqueeze(1 ) * attn_output
__lowercase = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
__lowercase = (
self.norma(_A ,_A ) if self.use_ada_layer_norm else self.norma(_A )
)
__lowercase = self.attna(
_A ,encoder_hidden_states=_A ,attention_mask=_A ,**_A ,)
__lowercase = attn_output + hidden_states
# 3. Feed-forward
__lowercase = self.norma(_A )
if self.use_ada_layer_norm_zero:
__lowercase = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`." )
__lowercase = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
__lowercase = torch.cat(
[self.ff(_A ) for hid_slice in norm_hidden_states.chunk(_A ,dim=self._chunk_dim )] ,dim=self._chunk_dim ,)
else:
__lowercase = self.ff(_A )
if self.use_ada_layer_norm_zero:
__lowercase = gate_mlp.unsqueeze(1 ) * ff_output
__lowercase = ff_output + hidden_states
return hidden_states
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Any ,lowercase__ : int ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[int] = 4 ,lowercase__ : Optional[Any] = 0.0 ,lowercase__ : Dict = "geglu" ,lowercase__ : Dict = False ,):
super().__init__()
__lowercase = int(dim * mult )
__lowercase = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
__lowercase = GELU(_A ,_A )
if activation_fn == "gelu-approximate":
__lowercase = GELU(_A ,_A ,approximate='''tanh''' )
elif activation_fn == "geglu":
__lowercase = GEGLU(_A ,_A )
elif activation_fn == "geglu-approximate":
__lowercase = ApproximateGELU(_A ,_A )
__lowercase = nn.ModuleList([] )
# project in
self.net.append(_A )
# project dropout
self.net.append(nn.Dropout(_A ) )
# project out
self.net.append(nn.Linear(_A ,_A ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_A ) )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ):
for module in self.net:
__lowercase = module(_A )
return hidden_states
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Any ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : List[Any] = "none" ):
super().__init__()
__lowercase = nn.Linear(_A ,_A )
__lowercase = approximate
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Any ):
if gate.device.type != "mps":
return F.gelu(_A ,approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ,approximate=self.approximate ).to(dtype=gate.dtype )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ):
__lowercase = self.proj(_A )
__lowercase = self.gelu(_A )
return hidden_states
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ):
super().__init__()
__lowercase = nn.Linear(_A ,dim_out * 2 )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Union[str, Any] ):
if gate.device.type != "mps":
return F.gelu(_A )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : int ):
__lowercase , __lowercase = self.proj(_A ).chunk(2 ,dim=-1 )
return hidden_states * self.gelu(_A )
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Tuple ,lowercase__ : Optional[int] ,lowercase__ : Optional[int] ):
super().__init__()
__lowercase = nn.Linear(_A ,_A )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Any ):
__lowercase = self.proj(_A )
return x * torch.sigmoid(1.7_0_2 * x )
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : int ,lowercase__ : str ,lowercase__ : Union[str, Any] ):
super().__init__()
__lowercase = nn.Embedding(_A ,_A )
__lowercase = nn.SiLU()
__lowercase = nn.Linear(_A ,embedding_dim * 2 )
__lowercase = nn.LayerNorm(_A ,elementwise_affine=_A )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : Any ):
__lowercase = self.linear(self.silu(self.emb(_A ) ) )
__lowercase , __lowercase = torch.chunk(_A ,2 )
__lowercase = self.norm(_A ) * (1 + scale) + shift
return x
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : str ):
super().__init__()
__lowercase = CombinedTimestepLabelEmbeddings(_A ,_A )
__lowercase = nn.SiLU()
__lowercase = nn.Linear(_A ,6 * embedding_dim ,bias=_A )
__lowercase = nn.LayerNorm(_A ,elementwise_affine=_A ,eps=1e-6 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[Any] ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : str=None ):
__lowercase = self.linear(self.silu(self.emb(_A ,_A ,hidden_dtype=_A ) ) )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = emb.chunk(6 ,dim=1 )
__lowercase = self.norm(_A ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] = None ,lowercase__ : Dict = 1e-5 ):
super().__init__()
__lowercase = num_groups
__lowercase = eps
if act_fn is None:
__lowercase = None
else:
__lowercase = get_activation(_A )
__lowercase = nn.Linear(_A ,out_dim * 2 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Dict ,lowercase__ : Tuple ):
if self.act:
__lowercase = self.act(_A )
__lowercase = self.linear(_A )
__lowercase = emb[:, :, None, None]
__lowercase , __lowercase = emb.chunk(2 ,dim=1 )
__lowercase = F.group_norm(_A ,self.num_groups ,eps=self.eps )
__lowercase = x * (1 + scale) + shift
return x
| 104
|
from cva import destroyAllWindows, imread, imshow, waitKey
def A__ ( __lowerCamelCase ):
# getting number of pixels in the image
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__UpperCAmelCase = imread("image_data/lena.jpg", 1)
# convert to its negative
__UpperCAmelCase = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 299
| 0
|
'''simple docstring'''
import baseaa
def _A ( _lowerCAmelCase ):
"""simple docstring"""
return baseaa.baaencode(string.encode('utf-8' ) )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
return baseaa.baadecode(__lowerCamelCase ).decode('utf-8' )
if __name__ == "__main__":
lowerCamelCase = """Hello World!"""
lowerCamelCase = baseaa_encode(test)
print(encoded)
lowerCamelCase = baseaa_decode(encoded)
print(decoded)
| 166
|
import math
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__lowerCamelCase )
def A__ ( __lowerCamelCase = 1 / 1_23_45 ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 3
while True:
SCREAMING_SNAKE_CASE_ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(__lowerCamelCase )
total_partitions += 1
if check_partition_perfect(__lowerCamelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__lowerCamelCase )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
| 0
|
'''simple docstring'''
UpperCamelCase_ = """\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"""
UpperCamelCase_ = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCamelCase_ = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 309
|
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase ) if (len(__lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ), '''Stack'''.center(__lowerCamelCase ), '''Postfix'''.center(__lowerCamelCase ), sep=''' | ''', )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowerCamelCase ) == 0:
stack.append(__lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowerCamelCase ) # push x to stack
print(
x.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
while len(__lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
return "".join(__lowerCamelCase ) # return Postfix as str
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowerCamelCase ) ):
if infix[i] == "(":
SCREAMING_SNAKE_CASE_ = ''')''' # change "(" to ")"
elif infix[i] == ")":
SCREAMING_SNAKE_CASE_ = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(__lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__UpperCAmelCase = input("\nEnter an Infix Equation = ") # Input an Infix equation
__UpperCAmelCase = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 299
| 0
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : Dict = logging.get_logger(__name__)
A : Optional[Any] = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
A : Dict = {
'''b0''': {
'''hidden_dim''': 1_2_8_0,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 2_2_4,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_2_8_0,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 2_4_0,
'''dropout_rate''': 0.2,
'''dw_padding''': [1_6],
},
'''b2''': {
'''hidden_dim''': 1_4_0_8,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 2_6_0,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 1_6],
},
'''b3''': {
'''hidden_dim''': 1_5_3_6,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 3_0_0,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 1_8],
},
'''b4''': {
'''hidden_dim''': 1_7_9_2,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 3_8_0,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_0_4_8,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 4_5_6,
'''dropout_rate''': 0.4,
'''dw_padding''': [1_3, 2_7],
},
'''b6''': {
'''hidden_dim''': 2_3_0_4,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 5_2_8,
'''dropout_rate''': 0.5,
'''dw_padding''': [3_1],
},
'''b7''': {
'''hidden_dim''': 2_5_6_0,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 6_0_0,
'''dropout_rate''': 0.5,
'''dw_padding''': [1_8],
},
}
def __lowerCamelCase ( __a :Optional[Any] ) -> Dict:
"""simple docstring"""
A__ = EfficientNetConfig()
A__ = CONFIG_MAP[model_name]["""hidden_dim"""]
A__ = CONFIG_MAP[model_name]["""width_coef"""]
A__ = CONFIG_MAP[model_name]["""depth_coef"""]
A__ = CONFIG_MAP[model_name]["""image_size"""]
A__ = CONFIG_MAP[model_name]["""dropout_rate"""]
A__ = CONFIG_MAP[model_name]["""dw_padding"""]
A__ = """huggingface/label-files"""
A__ = """imagenet-1k-id2label.json"""
A__ = 1_0_0_0
A__ = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
A__ = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
def __lowerCamelCase ( __a :List[str] ) -> Optional[Any]:
"""simple docstring"""
A__ = CONFIG_MAP[model_name]["""image_size"""]
A__ = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=__lowerCamelCase , )
return preprocessor
def __lowerCamelCase ( __a :Tuple ) -> Union[str, Any]:
"""simple docstring"""
A__ = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
A__ = sorted(set(__lowerCamelCase ) )
A__ = len(__lowerCamelCase )
A__ = {b: str(__lowerCamelCase ) for b, i in zip(__lowerCamelCase , range(__lowerCamelCase ) )}
A__ = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
A__ = block_name_mapping[b]
rename_keys.append((F'block{b}_expand_conv/kernel:0', F'encoder.blocks.{hf_b}.expansion.expand_conv.weight') )
rename_keys.append((F'block{b}_expand_bn/gamma:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.weight') )
rename_keys.append((F'block{b}_expand_bn/beta:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.bias') )
rename_keys.append(
(F'block{b}_expand_bn/moving_mean:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') )
rename_keys.append(
(F'block{b}_expand_bn/moving_variance:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') )
rename_keys.append(
(F'block{b}_dwconv/depthwise_kernel:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') )
rename_keys.append((F'block{b}_bn/gamma:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') )
rename_keys.append((F'block{b}_bn/beta:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') )
rename_keys.append(
(F'block{b}_bn/moving_mean:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') )
rename_keys.append(
(F'block{b}_bn/moving_variance:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') )
rename_keys.append((F'block{b}_se_reduce/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') )
rename_keys.append((F'block{b}_se_reduce/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') )
rename_keys.append((F'block{b}_se_expand/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') )
rename_keys.append((F'block{b}_se_expand/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') )
rename_keys.append(
(F'block{b}_project_conv/kernel:0', F'encoder.blocks.{hf_b}.projection.project_conv.weight') )
rename_keys.append((F'block{b}_project_bn/gamma:0', F'encoder.blocks.{hf_b}.projection.project_bn.weight') )
rename_keys.append((F'block{b}_project_bn/beta:0', F'encoder.blocks.{hf_b}.projection.project_bn.bias') )
rename_keys.append(
(F'block{b}_project_bn/moving_mean:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_mean') )
rename_keys.append(
(F'block{b}_project_bn/moving_variance:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_var') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
A__ = {}
for item in rename_keys:
if item[0] in original_param_names:
A__ = """efficientnet.""" + item[1]
A__ = """classifier.weight"""
A__ = """classifier.bias"""
return key_mapping
def __lowerCamelCase ( __a :List[Any] , __a :List[str] , __a :Optional[int] ) -> Optional[int]:
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
A__ = key_mapping[key]
if "_conv" in key and "kernel" in key:
A__ = torch.from_numpy(__lowerCamelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
A__ = torch.from_numpy(__lowerCamelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
A__ = torch.from_numpy(np.transpose(__lowerCamelCase ) )
else:
A__ = torch.from_numpy(__lowerCamelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__lowerCamelCase )
@torch.no_grad()
def __lowerCamelCase ( __a :Dict , __a :Union[str, Any] , __a :List[str] , __a :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = model_classes[model_name](
include_top=__lowerCamelCase , weights="""imagenet""" , input_tensor=__lowerCamelCase , input_shape=__lowerCamelCase , pooling=__lowerCamelCase , classes=1_0_0_0 , classifier_activation="""softmax""" , )
A__ = original_model.trainable_variables
A__ = original_model.non_trainable_variables
A__ = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
A__ = param.numpy()
A__ = list(tf_params.keys() )
# Load HuggingFace model
A__ = get_efficientnet_config(__lowerCamelCase )
A__ = EfficientNetForImageClassification(__lowerCamelCase ).eval()
A__ = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
A__ = rename_keys(__lowerCamelCase )
replace_params(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Initialize preprocessor and preprocess input image
A__ = convert_image_processor(__lowerCamelCase )
A__ = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
A__ = hf_model(**__lowerCamelCase )
A__ = outputs.logits.detach().numpy()
# Original model inference
A__ = False
A__ = CONFIG_MAP[model_name]["""image_size"""]
A__ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
A__ = image.img_to_array(__lowerCamelCase )
A__ = np.expand_dims(__lowerCamelCase , axis=0 )
A__ = original_model.predict(__lowerCamelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(__lowerCamelCase ):
os.mkdir(__lowerCamelCase )
# Save converted model and image processor
hf_model.save_pretrained(__lowerCamelCase )
preprocessor.save_pretrained(__lowerCamelCase )
if push_to_hub:
# Push model and image processor to hub
print(F'Pushing converted {model_name} to the hub...' )
A__ = F'efficientnet-{model_name}'
preprocessor.push_to_hub(__lowerCamelCase )
hf_model.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
A : List[str] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 274
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["input_features", "is_longer"]
def __init__( self , _A=64 , _A=48000 , _A=480 , _A=10 , _A=1024 , _A=0.0 , _A=False , _A = 0 , _A = 14000 , _A = None , _A = "fusion" , _A = "repeatpad" , **_A , ) -> Dict:
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
SCREAMING_SNAKE_CASE_ = top_db
SCREAMING_SNAKE_CASE_ = truncation
SCREAMING_SNAKE_CASE_ = padding
SCREAMING_SNAKE_CASE_ = fft_window_size
SCREAMING_SNAKE_CASE_ = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE_ = hop_length
SCREAMING_SNAKE_CASE_ = max_length_s
SCREAMING_SNAKE_CASE_ = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = frequency_min
SCREAMING_SNAKE_CASE_ = frequency_max
SCREAMING_SNAKE_CASE_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm=_A , mel_scale='''htk''' , )
SCREAMING_SNAKE_CASE_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , )
def _UpperCamelCase ( self ) -> Dict[str, Any]:
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _UpperCamelCase ( self , _A , _A = None ) -> np.ndarray:
SCREAMING_SNAKE_CASE_ = spectrogram(
_A , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_A , log_mel='''dB''' , )
return log_mel_spectrogram.T
def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE_ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE_ = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE_ = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.interpolate(
_A , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=_A )
SCREAMING_SNAKE_CASE_ = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _UpperCamelCase ( self , _A , _A , _A , _A ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE_ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE_ = len(_A ) - max_length
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE_ = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters )
SCREAMING_SNAKE_CASE_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE_ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE_ = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE_ = False
else:
SCREAMING_SNAKE_CASE_ = self._random_mel_fusion(_A , _A , _A )
SCREAMING_SNAKE_CASE_ = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
SCREAMING_SNAKE_CASE_ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE_ = int(max_length / len(_A ) )
SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_A , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE_ = int(max_length / len(_A ) )
SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_A , _A ) )
SCREAMING_SNAKE_CASE_ = np.pad(_A , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters )
SCREAMING_SNAKE_CASE_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , **_A , ) -> BatchFeature:
SCREAMING_SNAKE_CASE_ = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE_ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE_ = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE_ = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray(_A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
SCREAMING_SNAKE_CASE_ = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray(_A )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE_ = [
self._get_input_mel(_A , max_length if max_length else self.nb_max_samples , _A , _A )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for mel, longer in padded_inputs:
input_mel.append(_A )
is_longer.append(_A )
if truncation == "fusion" and sum(_A ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , len(_A ) )
SCREAMING_SNAKE_CASE_ = True
if isinstance(input_mel[0] , _A ):
SCREAMING_SNAKE_CASE_ = [np.asarray(_A , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE_ = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE_ = {'''input_features''': input_mel, '''is_longer''': is_longer}
SCREAMING_SNAKE_CASE_ = BatchFeature(_A )
if return_tensors is not None:
SCREAMING_SNAKE_CASE_ = input_features.convert_to_tensors(_A )
return input_features
| 299
| 0
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 148
|
import math
import random
def A__ ( __lowerCamelCase, __lowerCamelCase = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCAmelCase = 0.02
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = float(2 * (random.randint(1, 1_00 )) - 1 )
for _ in range(__lowerCamelCase ):
# Forward propagation
SCREAMING_SNAKE_CASE_ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
SCREAMING_SNAKE_CASE_ = (expected / 1_00) - layer_a
# Error delta
SCREAMING_SNAKE_CASE_ = layer_1_error * sigmoid_function(__lowerCamelCase, __lowerCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input("Expected value: "))
__UpperCAmelCase = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 299
| 0
|
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class _lowercase ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def a ( self : Any ) -> List[Any]:
__lowerCAmelCase = load_tool("""text-to-speech""" )
self.tool.setup()
def a ( self : List[str] ) -> Optional[int]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__lowerCAmelCase = self.tool("""hey""" )
__lowerCAmelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def a ( self : Optional[int] ) -> Optional[Any]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__lowerCAmelCase = self.tool("""hey""" )
__lowerCAmelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 229
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 299
| 0
|
from __future__ import annotations
def a ( A__ : Optional[int] ) -> Tuple:
"""simple docstring"""
if not nums:
raise ValueError('List is empty' )
return sum(__lowerCamelCase ) / len(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 205
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , _A = False , ) -> List[str]:
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Embedding(_A , _A )
SCREAMING_SNAKE_CASE_ = nn.Embedding(_A , _A )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = nn.Dropout(p=_A )
SCREAMING_SNAKE_CASE_ = TaConfig(
vocab_size=_A , d_model=_A , num_heads=_A , d_kv=_A , d_ff=_A , dropout_rate=_A , feed_forward_proj=_A , is_decoder=_A , is_encoder_decoder=_A , )
SCREAMING_SNAKE_CASE_ = nn.ModuleList()
for lyr_num in range(_A ):
SCREAMING_SNAKE_CASE_ = TaBlock(_A )
self.encoders.append(_A )
SCREAMING_SNAKE_CASE_ = TaLayerNorm(_A )
SCREAMING_SNAKE_CASE_ = nn.Dropout(p=_A )
def _UpperCamelCase ( self , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.token_embedder(_A )
SCREAMING_SNAKE_CASE_ = encoder_input_tokens.shape[1]
SCREAMING_SNAKE_CASE_ = torch.arange(_A , device=encoder_input_tokens.device )
x += self.position_encoding(_A )
SCREAMING_SNAKE_CASE_ = self.dropout_pre(_A )
# inverted the attention mask
SCREAMING_SNAKE_CASE_ = encoder_input_tokens.size()
SCREAMING_SNAKE_CASE_ = self.get_extended_attention_mask(_A , _A )
for lyr in self.encoders:
SCREAMING_SNAKE_CASE_ = lyr(_A , _A )[0]
SCREAMING_SNAKE_CASE_ = self.layer_norm(_A )
return self.dropout_post(_A ), encoder_inputs_mask
| 299
| 0
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__SCREAMING_SNAKE_CASE : Any = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__SCREAMING_SNAKE_CASE : Any = 128_022
__SCREAMING_SNAKE_CASE : Optional[Any] = 128_028
@require_sentencepiece
class __A (__SCREAMING_SNAKE_CASE , unittest.TestCase):
'''simple docstring'''
__lowercase: Tuple = MaMaaaTokenizer
__lowercase: List[Any] = False
__lowercase: List[str] = False
__lowercase: Optional[int] = True
def lowerCAmelCase ( self : Union[str, Any] ) ->str:
"""simple docstring"""
super().setUp()
snake_case_ = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
snake_case_ = dict(zip(_A , range(len(_A ) ) ) )
snake_case_ = Path(self.tmpdirname )
save_json(_A , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_A , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
snake_case_ = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : int , **UpperCAmelCase_ : Dict ) ->Optional[int]:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_A )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : str ) ->Dict:
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
snake_case_ = """</s>"""
snake_case_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<s>""" )
self.assertEqual(len(_A ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("""Skip this test while all models are still to be uploaded.""" )
def lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Dict ) ->Any:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [2, 3, 4, 5, 6] , )
snake_case_ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(_A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
snake_case_ = tokenizer.convert_tokens_to_string(_A )
self.assertEqual(_A , """This is a test""" )
@slow
def lowerCAmelCase ( self : Dict ) ->List[Any]:
"""simple docstring"""
snake_case_ = {"""input_ids""": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name="""facebook/m2m100_418M""" , revision="""c168bae485c864188cf9aa0e4108b0b6934dc91e""" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A (unittest.TestCase):
'''simple docstring'''
__lowercase: Tuple = """facebook/m2m100_418M"""
__lowercase: List[str] = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
__lowercase: Optional[Any] = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
__lowercase: int = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def lowerCAmelCase ( cls : Any ) ->int:
"""simple docstring"""
snake_case_ = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en""" , tgt_lang="""fr""" )
snake_case_ = 1
return cls
def lowerCAmelCase ( self : str ) ->Any:
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id("""ar""" ) , 128_006 )
self.assertEqual(self.tokenizer.get_lang_id("""en""" ) , 128_022 )
self.assertEqual(self.tokenizer.get_lang_id("""ro""" ) , 128_076 )
self.assertEqual(self.tokenizer.get_lang_id("""mr""" ) , 128_063 )
def lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = self.tokenizer.get_vocab()
self.assertEqual(len(_A ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["""<unk>"""] , 3 )
self.assertIn(self.tokenizer.get_lang_token("""en""" ) , _A )
def lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
snake_case_ = """en"""
snake_case_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _A )
def lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
self.assertIn(_A , self.tokenizer.all_special_ids )
# fmt: off
snake_case_ = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
snake_case_ = self.tokenizer.decode(_A , skip_special_tokens=_A )
snake_case_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
self.assertNotIn(self.tokenizer.eos_token , _A )
def lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = tempfile.mkdtemp()
snake_case_ = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_A )
snake_case_ = MaMaaaTokenizer.from_pretrained(_A )
self.assertDictEqual(new_tok.lang_token_to_id , _A )
@require_torch
def lowerCAmelCase ( self : Dict ) ->List[Any]:
"""simple docstring"""
snake_case_ = """en"""
snake_case_ = """fr"""
snake_case_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_A , return_tensors="""pt""" )
snake_case_ = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
snake_case_ = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowerCAmelCase ( self : Any ) ->Optional[int]:
"""simple docstring"""
snake_case_ = """mr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
snake_case_ = """zh"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def lowerCAmelCase ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = """mr"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
snake_case_ = """zh"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.tokenizer._build_translation_inputs("""A test""" , return_tensors="""pt""" , src_lang="""en""" , tgt_lang="""ar""" )
self.assertEqual(
nested_simplify(_A ) , {
# en_XX, A, test, EOS
"""input_ids""": [[128_022, 58, 4_183, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 128_006,
} , )
| 347
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="Wav2Vec2FeatureExtractor"
UpperCAmelCase_ ="AutoTokenizer"
def __init__( self , _A , _A ) -> Dict:
super().__init__(_A , _A )
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
@classmethod
def _UpperCamelCase ( cls , _A , **_A ) -> List[str]:
try:
return super().from_pretrained(_A , **_A )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , _A , )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE_ = WavaVecaCTCTokenizer.from_pretrained(_A , **_A )
return cls(feature_extractor=_A , tokenizer=_A )
def __call__( self , *_A , **_A ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_A , **_A )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''raw_speech''' )
else:
SCREAMING_SNAKE_CASE_ = kwargs.pop('''audio''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''sampling_rate''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''text''' , _A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(_A , **_A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ = encodings['''input_ids''']
return inputs
def _UpperCamelCase ( self , *_A , **_A ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_A , **_A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''input_features''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''labels''' , _A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor.pad(_A , *_A , **_A )
if labels is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad(_A , **_A )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE_ = labels['''input_ids''']
return input_features
def _UpperCamelCase ( self , *_A , **_A ) -> Any:
return self.tokenizer.batch_decode(*_A , **_A )
def _UpperCamelCase ( self , *_A , **_A ) -> Optional[Any]:
return self.tokenizer.decode(*_A , **_A )
@contextmanager
def _UpperCamelCase ( self ) -> Optional[int]:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.tokenizer
yield
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
| 299
| 0
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False , ) -> List[str]:
super().__init__()
lowerCAmelCase_ = nn.Embedding(_A , _A )
lowerCAmelCase_ = nn.Embedding(_A , _A )
lowerCAmelCase_ = False
lowerCAmelCase_ = nn.Dropout(p=_A )
lowerCAmelCase_ = TaConfig(
vocab_size=_A , d_model=_A , num_heads=_A , d_kv=_A , d_ff=_A , dropout_rate=_A , feed_forward_proj=_A , is_decoder=_A , is_encoder_decoder=_A , )
lowerCAmelCase_ = nn.ModuleList()
for lyr_num in range(_A ):
lowerCAmelCase_ = TaBlock(_A )
self.encoders.append(_A )
lowerCAmelCase_ = TaLayerNorm(_A )
lowerCAmelCase_ = nn.Dropout(p=_A )
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase_ = self.token_embedder(_A )
lowerCAmelCase_ = encoder_input_tokens.shape[1]
lowerCAmelCase_ = torch.arange(_A , device=encoder_input_tokens.device )
x += self.position_encoding(_A )
lowerCAmelCase_ = self.dropout_pre(_A )
# inverted the attention mask
lowerCAmelCase_ = encoder_input_tokens.size()
lowerCAmelCase_ = self.get_extended_attention_mask(_A , _A )
for lyr in self.encoders:
lowerCAmelCase_ = lyr(_A , _A )[0]
lowerCAmelCase_ = self.layer_norm(_A )
return self.dropout_post(_A ), encoder_inputs_mask
| 231
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__UpperCAmelCase = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = test_results.split(''' ''' )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
SCREAMING_SNAKE_CASE_ = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__lowerCamelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''', __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
SCREAMING_SNAKE_CASE_ = line
SCREAMING_SNAKE_CASE_ = False
return failures
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = title
SCREAMING_SNAKE_CASE_ = doc_test_results['''time_spent'''].split(''',''' )[0]
SCREAMING_SNAKE_CASE_ = doc_test_results['''success''']
SCREAMING_SNAKE_CASE_ = doc_test_results['''failures''']
SCREAMING_SNAKE_CASE_ = self.n_success + self.n_failures
# Failures and success of the modeling tests
SCREAMING_SNAKE_CASE_ = doc_test_results
@property
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = [self._time_spent]
SCREAMING_SNAKE_CASE_ = 0
for time in time_spent:
SCREAMING_SNAKE_CASE_ = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_A ) == 1:
SCREAMING_SNAKE_CASE_ = [0, 0, time_parts[0]]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'''{int(_A )}h{int(_A )}m{int(_A )}s'''
@property
def _UpperCamelCase ( self ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCamelCase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _UpperCamelCase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = 40
SCREAMING_SNAKE_CASE_ = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_A , _A )}
SCREAMING_SNAKE_CASE_ = ''''''
for category, failures in category_failures.items():
if len(_A ) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_A )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_A )
@staticmethod
def _UpperCamelCase ( ) -> Any:
SCREAMING_SNAKE_CASE_ = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_A )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=_A , )
def _UpperCamelCase ( self ) -> Optional[int]:
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
SCREAMING_SNAKE_CASE_ = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else '''All tests passed.'''
SCREAMING_SNAKE_CASE_ = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=_A , )
def _UpperCamelCase ( self , _A , _A , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = ''''''
for key, value in failures.items():
SCREAMING_SNAKE_CASE_ = value[:200] + ''' [Truncated]''' if len(_A ) > 250 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
SCREAMING_SNAKE_CASE_ = job_name
SCREAMING_SNAKE_CASE_ = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
SCREAMING_SNAKE_CASE_ = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCamelCase ( self ) -> int:
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
SCREAMING_SNAKE_CASE_ = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
SCREAMING_SNAKE_CASE_ = sorted(self.doc_test_results.items() , key=lambda _A : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
SCREAMING_SNAKE_CASE_ = F'''*Num failures* :{len(job_result["failed"] )} \n'''
SCREAMING_SNAKE_CASE_ = job_result['''failures''']
SCREAMING_SNAKE_CASE_ = self.get_reply_blocks(_A , _A , _A , text=_A )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=F'''Results for {job}''' , blocks=_A , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = os.environ['''GITHUB_RUN_ID''']
SCREAMING_SNAKE_CASE_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
SCREAMING_SNAKE_CASE_ = requests.get(__lowerCamelCase ).json()
SCREAMING_SNAKE_CASE_ = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
SCREAMING_SNAKE_CASE_ = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''', __lowerCamelCase )
return {}
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
if os.path.exists(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = os.listdir(__lowerCamelCase )
for file in files:
try:
with open(os.path.join(__lowerCamelCase, __lowerCamelCase ), encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(__lowerCamelCase, __lowerCamelCase )}.''' ) from e
return _artifact
def A__ ( ):
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = name
SCREAMING_SNAKE_CASE_ = []
def __str__( self ) -> int:
return self.name
def _UpperCamelCase ( self , _A ) -> Tuple:
self.paths.append({'''name''': self.name, '''path''': path} )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = filter(os.path.isdir, os.listdir() )
for directory in directories:
SCREAMING_SNAKE_CASE_ = directory
if artifact_name not in _available_artifacts:
SCREAMING_SNAKE_CASE_ = Artifact(__lowerCamelCase )
_available_artifacts[artifact_name].add_path(__lowerCamelCase )
return _available_artifacts
if __name__ == "__main__":
__UpperCAmelCase = get_job_links()
__UpperCAmelCase = retrieve_available_artifacts()
__UpperCAmelCase = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__UpperCAmelCase = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__UpperCAmelCase = github_actions_job_links.get("run_doctests")
__UpperCAmelCase = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
__UpperCAmelCase = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = handle_test_results(artifact["stats"])
__UpperCAmelCase = failed
__UpperCAmelCase = success
__UpperCAmelCase = time_spent[1:-1] + ", "
__UpperCAmelCase = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
__UpperCAmelCase = line.replace("FAILED ", "")
__UpperCAmelCase = line.split()[0].replace("\n", "")
if "::" in line:
__UpperCAmelCase , __UpperCAmelCase = line.split("::")
else:
__UpperCAmelCase , __UpperCAmelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__UpperCAmelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__UpperCAmelCase = all_failures[test] if test in all_failures else "N/A"
__UpperCAmelCase = failure
break
__UpperCAmelCase = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 299
| 0
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE):
'''simple docstring'''
_A = ''
_A = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self :Optional[Any] , a :Any = None , a :Tuple = None , **a :str , ) -> List[Any]:
super().__init__(self , **_A )
__UpperCamelCase : Tuple = repo_info
__UpperCamelCase : Any = token
__UpperCamelCase : str = None
def _lowerCamelCase ( self :Any ) -> Union[str, Any]:
if self.dir_cache is None:
__UpperCamelCase : int = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__UpperCamelCase : Optional[int] = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(_A ): {"name": str(_A ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _lowerCamelCase ( self :int , a :List[Any] , a :Optional[int] = "rb" , **a :str , ) -> str:
if not isinstance(self.repo_info , _A ):
raise NotImplementedError(f'Open is only implemented for dataset repositories, but got {self.repo_info}' )
__UpperCamelCase : Tuple = hf_hub_url(self.repo_info.id , _A , revision=self.repo_info.sha )
return fsspec.open(
_A , mode=_A , headers=get_authentication_headers_for_url(_A , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def _lowerCamelCase ( self :int , a :Union[str, Any] , **a :List[Any] ) -> List[Any]:
self._get_dirs()
__UpperCamelCase : List[Any] = self._strip_protocol(_A )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_A )
def _lowerCamelCase ( self :Dict , a :Any , a :Optional[Any]=False , **a :List[Any] ) -> str:
self._get_dirs()
__UpperCamelCase : Any = PurePosixPath(path.strip("/" ) )
__UpperCamelCase : Tuple = {}
for p, f in self.dir_cache.items():
__UpperCamelCase : Optional[int] = PurePosixPath(p.strip("/" ) )
__UpperCamelCase : List[str] = p.parent
if root == path:
__UpperCamelCase : Any = f
__UpperCamelCase : Dict = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 232
|
from __future__ import annotations
__UpperCAmelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
SCREAMING_SNAKE_CASE_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) )
] # the reference grid
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) )
] # the action grid
SCREAMING_SNAKE_CASE_ = init[0]
SCREAMING_SNAKE_CASE_ = init[1]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = g + heuristic[x][y] # cost from starting cell to destination cell
SCREAMING_SNAKE_CASE_ = [[f, g, x, y]]
SCREAMING_SNAKE_CASE_ = False # flag that is set when search is complete
SCREAMING_SNAKE_CASE_ = False # flag set if we can't find expand
while not found and not resign:
if len(__lowerCamelCase ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
SCREAMING_SNAKE_CASE_ = cell.pop()
SCREAMING_SNAKE_CASE_ = next_cell[2]
SCREAMING_SNAKE_CASE_ = next_cell[3]
SCREAMING_SNAKE_CASE_ = next_cell[1]
if x == goal[0] and y == goal[1]:
SCREAMING_SNAKE_CASE_ = True
else:
for i in range(len(__lowerCamelCase ) ): # to try out different valid actions
SCREAMING_SNAKE_CASE_ = x + DIRECTIONS[i][0]
SCREAMING_SNAKE_CASE_ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
SCREAMING_SNAKE_CASE_ = g + cost
SCREAMING_SNAKE_CASE_ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = i
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = goal[0]
SCREAMING_SNAKE_CASE_ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
SCREAMING_SNAKE_CASE_ = x - DIRECTIONS[action[x][y]][0]
SCREAMING_SNAKE_CASE_ = y - DIRECTIONS[action[x][y]][1]
SCREAMING_SNAKE_CASE_ = xa
SCREAMING_SNAKE_CASE_ = ya
invpath.append([x, y] )
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__lowerCamelCase ) ):
path.append(invpath[len(__lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCAmelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCAmelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCAmelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCAmelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCAmelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCAmelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCAmelCase = 99
__UpperCAmelCase , __UpperCAmelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 299
| 0
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 124
|
from __future__ import annotations
from collections.abc import Callable
__UpperCAmelCase = list[list[float | int]]
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [[0 for _ in range(size + 1 )] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for row in range(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = matrix[row][col]
SCREAMING_SNAKE_CASE_ = vector[row][0]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE_ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCamelCase, __lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = augmented[pivot_row], augmented[row]
for rowa in range(row + 1, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE_ = 0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, __lowerCamelCase ):
for row in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCamelCase, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 10 )] for row in range(__lowerCamelCase )
]
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [[0 for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = [[0] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for x_val, y_val in enumerate(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = (x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE_ = y_val
SCREAMING_SNAKE_CASE_ = solve(__lowerCamelCase, __lowerCamelCase )
def interpolated_func(__lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCamelCase ) )
return interpolated_func
def A__ ( __lowerCamelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def A__ ( __lowerCamelCase = question_function, __lowerCamelCase = 10 ):
SCREAMING_SNAKE_CASE_ = [func(__lowerCamelCase ) for x_val in range(1, order + 1 )]
SCREAMING_SNAKE_CASE_ = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for poly in polynomials:
SCREAMING_SNAKE_CASE_ = 1
while func(__lowerCamelCase ) == poly(__lowerCamelCase ):
x_val += 1
ret += poly(__lowerCamelCase )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
| 0
|
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def _A ( ):
"""simple docstring"""
__lowercase = [randint(-1000 , 1000 ) for i in range(10 )]
__lowercase = randint(-5000 , 5000 )
return (arr, r)
lowerCAmelCase__ = make_dataset()
def _A ( A__ , A__ ):
"""simple docstring"""
for triplet in permutations(__lowerCamelCase , 3 ):
if sum(__lowerCamelCase ) == target:
return tuple(sorted(__lowerCamelCase ) )
return (0, 0, 0)
def _A ( A__ , A__ ):
"""simple docstring"""
arr.sort()
__lowercase = len(__lowerCamelCase )
for i in range(n - 1 ):
__lowercase , __lowercase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def _A ( ):
"""simple docstring"""
__lowercase = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
__lowercase = '''
triplet_sum1(*dataset)
'''
__lowercase = '''
triplet_sum2(*dataset)
'''
__lowercase = repeat(setup=__lowerCamelCase , stmt=__lowerCamelCase , repeat=5 , number=10000 )
__lowercase = repeat(setup=__lowerCamelCase , stmt=__lowerCamelCase , repeat=5 , number=10000 )
return (min(__lowerCamelCase ), min(__lowerCamelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase__ = solution_times()
print(f'The time for naive implementation is {times[0]}.')
print(f'The time for optimized implementation is {times[1]}.')
| 104
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__UpperCAmelCase = logging.getLogger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="summarization"
UpperCAmelCase_ =["loss"]
UpperCAmelCase_ =ROUGE_KEYS
UpperCAmelCase_ ="rouge2"
def __init__( self , _A , **_A ) -> Tuple:
if hparams.sortish_sampler and hparams.gpus > 1:
SCREAMING_SNAKE_CASE_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(_A , num_labels=_A , mode=self.mode , **_A )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
SCREAMING_SNAKE_CASE_ = Path(self.output_dir ) / '''metrics.json'''
SCREAMING_SNAKE_CASE_ = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = defaultdict(_A )
SCREAMING_SNAKE_CASE_ = self.config.model_type
SCREAMING_SNAKE_CASE_ = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
SCREAMING_SNAKE_CASE_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
SCREAMING_SNAKE_CASE_ = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
SCREAMING_SNAKE_CASE_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
SCREAMING_SNAKE_CASE_ = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
SCREAMING_SNAKE_CASE_ = get_git_info()['''repo_sha''']
SCREAMING_SNAKE_CASE_ = hparams.num_workers
SCREAMING_SNAKE_CASE_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _A ):
SCREAMING_SNAKE_CASE_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
SCREAMING_SNAKE_CASE_ = self.decoder_start_token_id
SCREAMING_SNAKE_CASE_ = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
SCREAMING_SNAKE_CASE_ = self.hparams.eval_max_gen_length
else:
SCREAMING_SNAKE_CASE_ = self.model.config.max_length
SCREAMING_SNAKE_CASE_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _UpperCamelCase ( self , _A ) -> Dict[str, List[str]]:
SCREAMING_SNAKE_CASE_ = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(_A , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
SCREAMING_SNAKE_CASE_ = True
return readable_batch
def _UpperCamelCase ( self , _A , **_A ) -> List[str]:
return self.model(_A , **_A )
def _UpperCamelCase ( self , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
return lmap(str.strip , _A )
def _UpperCamelCase ( self , _A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = batch['''input_ids'''], batch['''attention_mask''']
SCREAMING_SNAKE_CASE_ = batch['''labels''']
if isinstance(self.model , _A ):
SCREAMING_SNAKE_CASE_ = self.model._shift_right(_A )
else:
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_A , _A )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
SCREAMING_SNAKE_CASE_ = decoder_input_ids
self.save_readable_batch(_A )
SCREAMING_SNAKE_CASE_ = self(_A , attention_mask=_A , decoder_input_ids=_A , use_cache=_A )
SCREAMING_SNAKE_CASE_ = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
SCREAMING_SNAKE_CASE_ = nn.CrossEntropyLoss(ignore_index=_A )
assert lm_logits.shape[-1] == self.vocab_size
SCREAMING_SNAKE_CASE_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
SCREAMING_SNAKE_CASE_ = nn.functional.log_softmax(_A , dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = label_smoothed_nll_loss(
_A , _A , self.hparams.label_smoothing , ignore_index=_A )
return (loss,)
@property
def _UpperCamelCase ( self ) -> int:
return self.tokenizer.pad_token_id
def _UpperCamelCase ( self , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = self._step(_A )
SCREAMING_SNAKE_CASE_ = dict(zip(self.loss_names , _A ) )
# tokens per batch
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].shape[0]
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].eq(self.pad ).sum()
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _UpperCamelCase ( self , _A , _A ) -> Dict:
return self._generative_step(_A )
def _UpperCamelCase ( self , _A , _A="val" ) -> Dict:
self.step_count += 1
SCREAMING_SNAKE_CASE_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
SCREAMING_SNAKE_CASE_ = losses['''loss''']
SCREAMING_SNAKE_CASE_ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
SCREAMING_SNAKE_CASE_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
SCREAMING_SNAKE_CASE_ = torch.tensor(_A ).type_as(_A )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_A )
SCREAMING_SNAKE_CASE_ = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
SCREAMING_SNAKE_CASE_ = self.step_count
self.metrics[prefix].append(_A ) # callback writes this to self.metrics_save_path
SCREAMING_SNAKE_CASE_ = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def _UpperCamelCase ( self , _A , _A ) -> Dict:
return calculate_rouge(_A , _A )
def _UpperCamelCase ( self , _A ) -> dict:
SCREAMING_SNAKE_CASE_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
SCREAMING_SNAKE_CASE_ = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_A , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
SCREAMING_SNAKE_CASE_ = (time.time() - ta) / batch['''input_ids'''].shape[0]
SCREAMING_SNAKE_CASE_ = self.ids_to_clean_text(_A )
SCREAMING_SNAKE_CASE_ = self.ids_to_clean_text(batch['''labels'''] )
SCREAMING_SNAKE_CASE_ = self._step(_A )
SCREAMING_SNAKE_CASE_ = dict(zip(self.loss_names , _A ) )
SCREAMING_SNAKE_CASE_ = self.calc_generative_metrics(_A , _A )
SCREAMING_SNAKE_CASE_ = np.mean(lmap(_A , _A ) )
base_metrics.update(gen_time=_A , gen_len=_A , preds=_A , target=_A , **_A )
return base_metrics
def _UpperCamelCase ( self , _A , _A ) -> Any:
return self._generative_step(_A )
def _UpperCamelCase ( self , _A ) -> Optional[int]:
return self.validation_epoch_end(_A , prefix='''test''' )
def _UpperCamelCase ( self , _A ) -> SeqaSeqDataset:
SCREAMING_SNAKE_CASE_ = self.n_obs[type_path]
SCREAMING_SNAKE_CASE_ = self.target_lens[type_path]
SCREAMING_SNAKE_CASE_ = self.dataset_class(
self.tokenizer , type_path=_A , n_obs=_A , max_target_length=_A , **self.dataset_kwargs , )
return dataset
def _UpperCamelCase ( self , _A , _A , _A = False ) -> DataLoader:
SCREAMING_SNAKE_CASE_ = self.get_dataset(_A )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ = dataset.make_sortish_sampler(_A , distributed=self.hparams.gpus > 1 )
return DataLoader(
_A , batch_size=_A , collate_fn=dataset.collate_fn , shuffle=_A , num_workers=self.num_workers , sampler=_A , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_A , batch_sampler=_A , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_A , batch_size=_A , collate_fn=dataset.collate_fn , shuffle=_A , num_workers=self.num_workers , sampler=_A , )
def _UpperCamelCase ( self ) -> DataLoader:
SCREAMING_SNAKE_CASE_ = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_A )
return dataloader
def _UpperCamelCase ( self ) -> DataLoader:
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def _UpperCamelCase ( self ) -> DataLoader:
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _UpperCamelCase ( _A , _A ) -> Dict:
BaseTransformer.add_model_specific_args(_A , _A )
add_generic_args(_A , _A )
parser.add_argument(
'''--max_source_length''' , default=1024 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_A )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_A )
parser.add_argument('''--max_tokens_per_batch''' , type=_A , default=_A )
parser.add_argument('''--logger_name''' , type=_A , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=_A , default=-1 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=_A , default=500 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=_A , default=-1 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=_A , default='''summarization''' , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=_A , default=0.0 , required=_A )
parser.add_argument('''--src_lang''' , type=_A , default='''''' , required=_A )
parser.add_argument('''--tgt_lang''' , type=_A , default='''''' , required=_A )
parser.add_argument('''--eval_beams''' , type=_A , default=_A , required=_A )
parser.add_argument(
'''--val_metric''' , type=_A , default=_A , required=_A , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=_A , default=_A , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=_A , default=1 , required=_A , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=_A , default=-1 , required=_A , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="translation"
UpperCAmelCase_ =["loss"]
UpperCAmelCase_ =["bleu"]
UpperCAmelCase_ ="bleu"
def __init__( self , _A , **_A ) -> Optional[int]:
super().__init__(_A , **_A )
SCREAMING_SNAKE_CASE_ = hparams.src_lang
SCREAMING_SNAKE_CASE_ = hparams.tgt_lang
def _UpperCamelCase ( self , _A , _A ) -> dict:
return calculate_bleu(_A , _A )
def A__ ( __lowerCamelCase, __lowerCamelCase=None ):
Path(args.output_dir ).mkdir(exist_ok=__lowerCamelCase )
check_output_dir(__lowerCamelCase, expected_items=3 )
if model is None:
if "summarization" in args.task:
SCREAMING_SNAKE_CASE_ = SummarizationModule(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = TranslationModule(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
SCREAMING_SNAKE_CASE_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ = os.environ.get('''WANDB_PROJECT''', __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = WandbLogger(name=model.output_dir.name, project=__lowerCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ = WandbLogger(name=model.output_dir.name, project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
SCREAMING_SNAKE_CASE_ = get_early_stopping_callback(model.val_metric, args.early_stopping_patience )
else:
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = args.val_metric == '''loss'''
SCREAMING_SNAKE_CASE_ = generic_train(
__lowerCamelCase, __lowerCamelCase, logging_callback=SeqaSeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(
args.output_dir, model.val_metric, args.save_top_k, __lowerCamelCase ), early_stopping_callback=__lowerCamelCase, logger=__lowerCamelCase, )
pickle_save(model.hparams, model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
SCREAMING_SNAKE_CASE_ = ''''''
SCREAMING_SNAKE_CASE_ = sorted(glob.glob(os.path.join(args.output_dir, '''*.ckpt''' ), recursive=__lowerCamelCase ) )
if checkpoints:
SCREAMING_SNAKE_CASE_ = checkpoints[-1]
SCREAMING_SNAKE_CASE_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
__UpperCAmelCase = pl.Trainer.add_argparse_args(parser)
__UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__UpperCAmelCase = parser.parse_args()
main(args)
| 299
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCAmelCase__ = 42 # [batch_size x 3]
lowerCAmelCase__ = 42 # [batch_size x 3]
lowerCAmelCase__ = 42 # [batch_size x 3]
lowerCAmelCase__ = 42 # [batch_size x 3]
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape) == len(self.y.shape) == len(self.z.shape) == len(self.origin.shape) == 2
def __lowerCamelCase ( self : str):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa))
def __lowerCamelCase ( self : int):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa))
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =torch.arange(self.height * self.width)
__lowercase =torch.stack(
[
pixel_indices % self.width,
torch.div(_A , self.width , rounding_mode='trunc'),
] , axis=1 , )
return coords
@property
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase , *__lowercase =self.shape
__lowercase =int(np.prod(_A))
__lowercase =self.get_image_coords()
__lowercase =torch.broadcast_to(coords.unsqueeze(0) , [batch_size * inner_batch_size, *coords.shape])
__lowercase =self.get_camera_rays(_A)
__lowercase =rays.view(_A , inner_batch_size * self.height * self.width , 2 , 3)
return rays
def __lowerCamelCase ( self : Optional[int] , _lowerCAmelCase : List[str]):
'''simple docstring'''
__lowercase , *__lowercase , __lowercase =coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__lowercase =coords.view(_A , -1 , 2)
__lowercase =self.resolution()
__lowercase =self.fov()
__lowercase =(flat.float() / (res - 1)) * 2 - 1
__lowercase =fracs * torch.tan(fov / 2)
__lowercase =fracs.view(_A , -1 , 2)
__lowercase =(
self.z.view(_A , 1 , 3)
+ self.x.view(_A , 1 , 3) * fracs[:, :, :1]
+ self.y.view(_A , 1 , 3) * fracs[:, :, 1:]
)
__lowercase =directions / directions.norm(dim=-1 , keepdim=_A)
__lowercase =torch.stack(
[
torch.broadcast_to(self.origin.view(_A , 1 , 3) , [batch_size, directions.shape[1], 3]),
directions,
] , dim=2 , )
return rays.view(_A , *_A , 2 , 3)
def __lowerCamelCase ( self : str , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_A , height=_A , x_fov=self.x_fov , y_fov=self.y_fov , )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =[]
__lowercase =[]
__lowercase =[]
__lowercase =[]
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
__lowercase =np.array([np.sin(__lowerCamelCase ), np.cos(__lowerCamelCase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__lowercase =-z * 4
__lowercase =np.array([np.cos(__lowerCamelCase ), -np.sin(__lowerCamelCase ), 0.0] )
__lowercase =np.cross(__lowerCamelCase , __lowerCamelCase )
origins.append(__lowerCamelCase )
xs.append(__lowerCamelCase )
ys.append(__lowerCamelCase )
zs.append(__lowerCamelCase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__lowerCamelCase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__lowerCamelCase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__lowerCamelCase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__lowerCamelCase , axis=0 ) ).float() , width=__lowerCamelCase , height=__lowerCamelCase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__lowerCamelCase )) , )
| 166
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["LayoutLMv2FeatureExtractor"]
__UpperCAmelCase = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 299
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
UpperCamelCase_ = list[list[float | int]]
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any ) -> str:
_lowerCAmelCase : List[str] = len(__lowerCamelCase )
_lowerCAmelCase : Optional[int] = [[0 for _ in range(size + 1 )] for _ in range(__lowerCamelCase )]
_lowerCAmelCase : int = 42
_lowerCAmelCase : List[Any] = 42
_lowerCAmelCase : Optional[int] = 42
_lowerCAmelCase : str = 42
_lowerCAmelCase : Optional[int] = 42
_lowerCAmelCase : List[Any] = 42
for row in range(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = matrix[row][col]
_lowerCAmelCase : int = vector[row][0]
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[str] = 0
while row < size and col < size:
# pivoting
_lowerCAmelCase : Any = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCamelCase , __lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowerCAmelCase , _lowerCAmelCase : Dict = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __lowerCamelCase ):
_lowerCAmelCase : List[Any] = augmented[rowa][col] / augmented[row][col]
_lowerCAmelCase : List[str] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __lowerCamelCase ):
for row in range(__lowerCamelCase ):
_lowerCAmelCase : str = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__lowerCamelCase )
]
def _UpperCAmelCase ( _lowerCamelCase : List[str] ) -> Tuple:
_lowerCAmelCase : Any = len(__lowerCamelCase )
_lowerCAmelCase : Tuple = [[0 for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
_lowerCAmelCase : Any = [[0] for _ in range(__lowerCamelCase )]
_lowerCAmelCase : Tuple = 42
_lowerCAmelCase : int = 42
_lowerCAmelCase : Tuple = 42
_lowerCAmelCase : Union[str, Any] = 42
for x_val, y_val in enumerate(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
_lowerCAmelCase : List[str] = (x_val + 1) ** (size - col - 1)
_lowerCAmelCase : Optional[int] = y_val
_lowerCAmelCase : List[Any] = solve(__lowerCamelCase , __lowerCamelCase )
def interpolated_func(_lowerCamelCase : Dict ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCamelCase ) )
return interpolated_func
def _UpperCAmelCase ( _lowerCamelCase : int ) -> List[str]:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def _UpperCAmelCase ( _lowerCamelCase : List[Any] = question_function , _lowerCamelCase : int = 10 ) -> Tuple:
_lowerCAmelCase : str = [func(__lowerCamelCase ) for x_val in range(1 , order + 1 )]
_lowerCAmelCase : Optional[int] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : int = 42
_lowerCAmelCase : Optional[int] = 42
for poly in polynomials:
_lowerCAmelCase : Tuple = 1
while func(__lowerCamelCase ) == poly(__lowerCamelCase ):
x_val += 1
ret += poly(__lowerCamelCase )
return ret
if __name__ == "__main__":
print(F'{solution() = }')
| 309
|
import functools
def A__ ( __lowerCamelCase, __lowerCamelCase ):
# Validation
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(__lowerCamelCase ) != 3 or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(__lowerCamelCase ) == 0:
return 0
if min(__lowerCamelCase ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(__lowerCamelCase ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
SCREAMING_SNAKE_CASE_ = set(__lowerCamelCase )
@functools.cache
def dynamic_programming(__lowerCamelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ), costs[1] + dynamic_programming(index + 7 ), costs[2] + dynamic_programming(index + 30 ), )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
| 0
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = TextToVideoSDPipeline
__lowerCamelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS
__lowerCamelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__lowerCamelCase : Optional[int] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def a_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
A__ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
A__ = CLIPTextModel(_A )
A__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def a_ ( self : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=0 ) -> Optional[int]:
"""simple docstring"""
if str(_A ).startswith("""mps""" ):
A__ = torch.manual_seed(_A )
else:
A__ = torch.Generator(device=_A ).manual_seed(_A )
A__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def a_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
A__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = TextToVideoSDPipeline(**_A )
A__ = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
A__ = self.get_dummy_inputs(_A )
A__ = """np"""
A__ = sd_pipe(**_A ).frames
A__ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
A__ = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a_ ( self : Any ) -> Optional[int]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_A , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def a_ ( self : Tuple ) -> str:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_A , expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def a_ ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def a_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def a_ ( self : Tuple ) -> Any:
"""simple docstring"""
pass
def a_ ( self : str ) -> Any:
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class A (unittest.TestCase ):
'''simple docstring'''
def a_ ( self : int ) -> int:
"""simple docstring"""
A__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
A__ = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A__ = pipe.to("""cuda""" )
A__ = """Spiderman is surfing"""
A__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ = pipe(_A , generator=_A , num_inference_steps=25 , output_type="""pt""" ).frames
A__ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def a_ ( self : Optional[int] ) -> int:
"""simple docstring"""
A__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
A__ = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ = pipe.to("""cuda""" )
A__ = """Spiderman is surfing"""
A__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ = pipe(_A , generator=_A , num_inference_steps=2 , output_type="""pt""" ).frames
A__ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 274
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__UpperCAmelCase = logging.get_logger(__name__)
enable_full_determinism()
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> List[str]:
return (3, 32, 32)
@property
def _UpperCamelCase ( self ) -> Tuple:
return (3, 32, 32)
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> Tuple:
return (4, 32, 32)
@property
def _UpperCamelCase ( self ) -> Tuple:
return (4, 32, 32)
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
SCREAMING_SNAKE_CASE_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model.to(_A )
SCREAMING_SNAKE_CASE_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _UpperCamelCase ( self ) -> Dict:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model_accelerate.to(_A )
model_accelerate.eval()
SCREAMING_SNAKE_CASE_ = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE_ = noise.to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] * noise.shape[0] ).to(_A )
SCREAMING_SNAKE_CASE_ = model_accelerate(_A , _A )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=_A , low_cpu_mem_usage=_A )
model_normal_load.to(_A )
model_normal_load.eval()
SCREAMING_SNAKE_CASE_ = model_normal_load(_A , _A )['''sample''']
assert torch_all_close(_A , _A , rtol=1E-3 )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(_A )
SCREAMING_SNAKE_CASE_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE_ = noise.to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] * noise.shape[0] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-3 ) )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self , _A=(32, 32) ) -> int:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> List[str]:
return (3, 32, 32)
@property
def _UpperCamelCase ( self ) -> List[Any]:
return (3, 32, 32)
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1E-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
@slow
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
SCREAMING_SNAKE_CASE_ = self.dummy_input
SCREAMING_SNAKE_CASE_ = floats_tensor((4, 3) + (256, 256) ).to(_A )
SCREAMING_SNAKE_CASE_ = noise
SCREAMING_SNAKE_CASE_ = model(**_A )
assert image is not None, "Make sure output is not None"
@slow
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(_A )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (256, 256)
SCREAMING_SNAKE_CASE_ = torch.ones((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [1E-4] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-2 ) )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(_A )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = torch.ones((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [1E-4] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-2 ) )
def _UpperCamelCase ( self ) -> Dict:
# not required for this model
pass
| 299
| 0
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=400 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=1 / 255 , SCREAMING_SNAKE_CASE=True , ):
"""simple docstring"""
snake_case : int = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
snake_case : Tuple = parent
snake_case : Dict = batch_size
snake_case : Dict = num_channels
snake_case : Optional[Any] = min_resolution
snake_case : Union[str, Any] = max_resolution
snake_case : int = do_resize
snake_case : Tuple = size
snake_case : Union[str, Any] = do_normalize
snake_case : int = image_mean
snake_case : Any = image_std
snake_case : Dict = do_rescale
snake_case : Any = rescale_factor
snake_case : Tuple = do_pad
def lowerCamelCase_ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if not batched:
snake_case : Dict = image_inputs[0]
if isinstance(_A , Image.Image ):
snake_case , snake_case : Optional[Any] = image.size
else:
snake_case , snake_case : Dict = image.shape[1], image.shape[2]
if w < h:
snake_case : Dict = int(self.size["shortest_edge"] * h / w )
snake_case : str = self.size["shortest_edge"]
elif w > h:
snake_case : List[str] = self.size["shortest_edge"]
snake_case : Union[str, Any] = int(self.size["shortest_edge"] * w / h )
else:
snake_case : Optional[Any] = self.size["shortest_edge"]
snake_case : Optional[int] = self.size["shortest_edge"]
else:
snake_case : List[str] = []
for image in image_inputs:
snake_case , snake_case : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case : Any = max(_A , key=lambda SCREAMING_SNAKE_CASE : item[0] )[0]
snake_case : str = max(_A , key=lambda SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
a__ : Any = DetaImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = DetaImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , "image_mean" ) )
self.assertTrue(hasattr(_A , "image_std" ) )
self.assertTrue(hasattr(_A , "do_normalize" ) )
self.assertTrue(hasattr(_A , "do_resize" ) )
self.assertTrue(hasattr(_A , "do_rescale" ) )
self.assertTrue(hasattr(_A , "do_pad" ) )
self.assertTrue(hasattr(_A , "size" ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , _A )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
snake_case : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case , snake_case : Dict = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case , snake_case : List[Any] = self.image_processor_tester.get_expected_values(_A , batched=_A )
snake_case : Tuple = image_processing(_A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
snake_case : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case , snake_case : Dict = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case : List[Any] = image_processing(_A , return_tensors="pt" ).pixel_values
snake_case , snake_case : Any = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
snake_case : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case , snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case : int = image_processing(_A , return_tensors="pt" ).pixel_values
snake_case , snake_case : List[str] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case : Dict = json.loads(f.read() )
snake_case : str = {"image_id": 39_769, "annotations": target}
# encode them
snake_case : List[Any] = DetaImageProcessor()
snake_case : Tuple = image_processing(images=_A , annotations=_A , return_tensors="pt" )
# verify pixel values
snake_case : List[Any] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , _A )
snake_case : Optional[Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _A , atol=1E-4 ) )
# verify area
snake_case : str = torch.tensor([58_87.96_00, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _A ) )
# verify boxes
snake_case : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _A )
snake_case : Any = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _A , atol=1E-3 ) )
# verify image_id
snake_case : Optional[Any] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _A ) )
# verify is_crowd
snake_case : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _A ) )
# verify class_labels
snake_case : str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _A ) )
# verify orig_size
snake_case : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _A ) )
# verify size
snake_case : Dict = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _A ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case : List[Any] = json.loads(f.read() )
snake_case : Any = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
snake_case : str = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case : Tuple = DetaImageProcessor(format="coco_panoptic" )
snake_case : List[str] = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors="pt" )
# verify pixel values
snake_case : Optional[Any] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , _A )
snake_case : List[Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _A , atol=1E-4 ) )
# verify area
snake_case : Optional[int] = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _A ) )
# verify boxes
snake_case : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _A )
snake_case : Any = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _A , atol=1E-3 ) )
# verify image_id
snake_case : Union[str, Any] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _A ) )
# verify is_crowd
snake_case : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _A ) )
# verify class_labels
snake_case : Union[str, Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _A ) )
# verify masks
snake_case : Optional[Any] = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , _A )
# verify orig_size
snake_case : Dict = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _A ) )
# verify size
snake_case : Optional[Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _A ) )
| 148
|
def A__ ( __lowerCamelCase = 10_00 ):
SCREAMING_SNAKE_CASE_ = 2**power
SCREAMING_SNAKE_CASE_ = 0
while n:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 299
| 0
|
'''simple docstring'''
from typing import Any
class _lowercase :
'''simple docstring'''
def __init__( self : str , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
__lowerCAmelCase = data
__lowerCAmelCase = None
class _lowercase :
'''simple docstring'''
def __init__( self : Union[str, Any] ) -> Optional[Any]:
__lowerCAmelCase = None
def a ( self : Tuple ) -> Tuple:
__lowerCAmelCase = self.head
while temp is not None:
print(temp.data , end=""" """ )
__lowerCAmelCase = temp.next
print()
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
__lowerCAmelCase = Node(_A )
__lowerCAmelCase = self.head
__lowerCAmelCase = new_node
def a ( self : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ) -> int:
if node_data_a == node_data_a:
return
else:
__lowerCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
__lowerCAmelCase = node_a.next
__lowerCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
__lowerCAmelCase = node_a.next
if node_a is None or node_a is None:
return
__lowerCAmelCase , __lowerCAmelCase = node_a.data, node_a.data
if __name__ == "__main__":
_A : Dict = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 229
|
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["torch", "scipy"]
def __init__( self , *_A , **_A ) -> Tuple:
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Any:
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Tuple:
requires_backends(cls , ['''torch''', '''scipy'''] )
| 299
| 0
|
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 205
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=3 , _A=32 , _A=3 , _A=10 , _A=[10, 20, 30, 40] , _A=[1, 1, 2, 1] , _A=True , _A=True , _A="relu" , _A=3 , _A=None , ) -> Tuple:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embeddings_size
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = len(_A )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values
def _UpperCamelCase ( self ) -> Optional[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _UpperCamelCase ( self , _A , _A ) -> int:
SCREAMING_SNAKE_CASE_ = FlaxRegNetModel(config=_A )
SCREAMING_SNAKE_CASE_ = model(_A )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _UpperCamelCase ( self , _A , _A ) -> Any:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = FlaxRegNetForImageClassification(config=_A )
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =(FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> None:
SCREAMING_SNAKE_CASE_ = FlaxRegNetModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A , has_text_modality=_A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self ) -> str:
return
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _UpperCamelCase ( self ) -> int:
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _UpperCamelCase ( self ) -> Dict:
pass
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_A )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def _UpperCamelCase ( self ) -> Any:
def check_hidden_states_output(_A , _A , _A ):
SCREAMING_SNAKE_CASE_ = model_class(_A )
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_A , _A ) )
SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_ = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_A , _A , _A )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(_A , _A )
SCREAMING_SNAKE_CASE_ = model_class(_A )
@jax.jit
def model_jitted(_A , **_A ):
return model(pixel_values=_A , **_A )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE_ = model_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ = model_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase ( self ) -> Optional[int]:
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_A , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = model(**_A )
# verify the logits
SCREAMING_SNAKE_CASE_ = (1, 1000)
self.assertEqual(outputs.logits.shape , _A )
SCREAMING_SNAKE_CASE_ = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
| 299
| 0
|
"""simple docstring"""
from math import ceil
def _a ( _SCREAMING_SNAKE_CASE = 1_001 ) -> Optional[int]:
snake_case_ = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
snake_case_ = 2 * i + 1
snake_case_ = 2 * i
snake_case_ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__SCREAMING_SNAKE_CASE : int = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 347
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(number**0.5 )
return number == sq * sq
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
SCREAMING_SNAKE_CASE_ = x_den * y_den * z_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def A__ ( __lowerCamelCase = 35 ):
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = Fraction(0 )
SCREAMING_SNAKE_CASE_ = 42
for x_num in range(1, order + 1 ):
for x_den in range(x_num + 1, order + 1 ):
for y_num in range(1, order + 1 ):
for y_den in range(y_num + 1, order + 1 ):
# n=1
SCREAMING_SNAKE_CASE_ = x_num * y_den + x_den * y_num
SCREAMING_SNAKE_CASE_ = x_den * y_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
SCREAMING_SNAKE_CASE_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
SCREAMING_SNAKE_CASE_ = x_den * x_den * y_den * y_den
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=-1
SCREAMING_SNAKE_CASE_ = x_num * y_num
SCREAMING_SNAKE_CASE_ = x_den * y_num + x_num * y_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
SCREAMING_SNAKE_CASE_ = x_num * x_num * y_num * y_num
SCREAMING_SNAKE_CASE_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
for num, den in unique_s:
total += Fraction(__lowerCamelCase, __lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
| 0
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None ) -> List[Any]:
super().__init__()
lowerCAmelCase_ = pad_token_id
lowerCAmelCase_ = max_length
lowerCAmelCase_ = vocab
lowerCAmelCase_ = merges
lowerCAmelCase_ = BytePairTokenizer(_A , _A , sequence_length=_A )
@classmethod
def __a ( cls , _UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase ) -> Any:
lowerCAmelCase_ = [" ".join(_A ) for m in tokenizer.bpe_ranks.keys()]
lowerCAmelCase_ = tokenizer.get_vocab()
return cls(_A , _A , *_A , **_A )
@classmethod
def __a ( cls , _UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(_A , *_A , **_A )
return cls.from_tokenizer(_A , *_A , **_A )
@classmethod
def __a ( cls , _UpperCamelCase ) -> Tuple:
return cls(**_A )
def __a ( self ) -> Optional[int]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def __a ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Union[str, Any]:
lowerCAmelCase_ = self.tf_tokenizer(_A )
lowerCAmelCase_ = tf.ones_like(_A )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowerCAmelCase_ = max_length if max_length is not None else self.max_length
if max_length is not None:
lowerCAmelCase_ , lowerCAmelCase_ = pad_model_inputs(
_A , max_seq_length=_A , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 231
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__( self , _A , _A = None , _A = None ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE_ = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
SCREAMING_SNAKE_CASE_ = torch.zeros(_A , _A )
else:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(_A )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
def __init__( self , _A , _A , _A , _A , _A , _A , ) -> Any:
super().__init__()
self.register_modules(
vqvae=_A , transformer=_A , text_encoder=_A , tokenizer=_A , scheduler=_A , learned_classifier_free_sampling_embeddings=_A , )
def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = len(_A ) if isinstance(_A , _A ) else 1
# get prompt text embeddings
SCREAMING_SNAKE_CASE_ = self.tokenizer(
_A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
SCREAMING_SNAKE_CASE_ = text_input_ids[:, : self.tokenizer.model_max_length]
SCREAMING_SNAKE_CASE_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
SCREAMING_SNAKE_CASE_ = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_A )
# duplicate text embeddings for each generation per prompt
SCREAMING_SNAKE_CASE_ = prompt_embeds.repeat_interleave(_A , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
SCREAMING_SNAKE_CASE_ = self.learned_classifier_free_sampling_embeddings.embeddings
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.unsqueeze(0 ).repeat(_A , 1 , 1 )
else:
SCREAMING_SNAKE_CASE_ = [''''''] * batch_size
SCREAMING_SNAKE_CASE_ = text_input_ids.shape[-1]
SCREAMING_SNAKE_CASE_ = self.tokenizer(
_A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_A )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.shape[1]
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.repeat(1 , _A , 1 )
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE_ = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , _A , _A = 100 , _A = 5.0 , _A = 1.0 , _A = 1 , _A = None , _A = None , _A = "pil" , _A = True , _A = None , _A = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = 1
elif isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = len(_A )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(_A )}''' )
SCREAMING_SNAKE_CASE_ = batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE_ = guidance_scale > 1.0
SCREAMING_SNAKE_CASE_ = self._encode_prompt(_A , _A , _A )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(_A )}.''' )
# get the initial completely masked latents unless the user supplied it
SCREAMING_SNAKE_CASE_ = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
SCREAMING_SNAKE_CASE_ = self.transformer.num_vector_embeds - 1
SCREAMING_SNAKE_CASE_ = torch.full(_A , _A ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
SCREAMING_SNAKE_CASE_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_A , device=self.device )
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps.to(self.device )
SCREAMING_SNAKE_CASE_ = latents
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the sample if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
SCREAMING_SNAKE_CASE_ = self.transformer(_A , encoder_hidden_states=_A , timestep=_A ).sample
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model_output.chunk(2 )
SCREAMING_SNAKE_CASE_ = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_A , dim=1 , keepdim=_A )
SCREAMING_SNAKE_CASE_ = self.truncate(_A , _A )
# remove `log(0)`'s (`-inf`s)
SCREAMING_SNAKE_CASE_ = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , timestep=_A , sample=_A , generator=_A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A , _A )
SCREAMING_SNAKE_CASE_ = self.vqvae.config.vq_embed_dim
SCREAMING_SNAKE_CASE_ = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
SCREAMING_SNAKE_CASE_ = self.vqvae.quantize.get_codebook_entry(_A , shape=_A )
SCREAMING_SNAKE_CASE_ = self.vqvae.decode(_A , force_not_quantize=_A ).sample
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
def _UpperCamelCase ( self , _A , _A ) -> torch.FloatTensor:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.sort(_A , 1 , descending=_A )
SCREAMING_SNAKE_CASE_ = torch.exp(_A )
SCREAMING_SNAKE_CASE_ = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
SCREAMING_SNAKE_CASE_ = torch.full_like(keep_mask[:, 0:1, :] , _A )
SCREAMING_SNAKE_CASE_ = torch.cat((all_true, keep_mask) , dim=1 )
SCREAMING_SNAKE_CASE_ = keep_mask[:, :-1, :]
SCREAMING_SNAKE_CASE_ = keep_mask.gather(1 , indices.argsort(1 ) )
SCREAMING_SNAKE_CASE_ = log_p_x_0.clone()
SCREAMING_SNAKE_CASE_ = -torch.inf # -inf = log(0)
return rv
| 299
| 0
|
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int]) -> int:
'''simple docstring'''
__UpperCamelCase : str = 1
__UpperCamelCase : Tuple = 2
while i * i <= n:
__UpperCamelCase : Optional[int] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Optional[Any] = 1
while True:
i += 1
t_num += i
if count_divisors(__lowerCamelCase) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 232
|
def A__ ( __lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase : int = {
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
lowerCamelCase : List[Any] = {'mobilebert-uncased': 5_1_2}
lowerCamelCase : List[str] = {}
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_INIT_CONFIGURATION
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = MobileBertTokenizer
def __init__( self , A=None , A=None , A=True , A="[UNK]" , A="[SEP]" , A="[PAD]" , A="[CLS]" , A="[MASK]" , A=True , A=None , **A , ) -> List[str]:
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
snake_case : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _A ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _A ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _A ) != tokenize_chinese_chars
):
snake_case : Optional[int] = getattr(_A , normalizer_state.pop("""type""" ) )
snake_case : str = do_lower_case
snake_case : List[Any] = strip_accents
snake_case : Union[str, Any] = tokenize_chinese_chars
snake_case : int = normalizer_class(**_A )
snake_case : Tuple = do_lower_case
def UpperCAmelCase ( self , A , A=None ) -> int:
snake_case : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , A , A = None ) -> List[int]:
snake_case : Union[str, Any] = [self.sep_token_id]
snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
snake_case : Tuple = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 124
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCAmelCase = "pt"
elif is_tf_available():
__UpperCAmelCase = "tf"
else:
__UpperCAmelCase = "jax"
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =ByTaTokenizer
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Tuple:
super().setUp()
SCREAMING_SNAKE_CASE_ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCamelCase ( self ) -> List[str]:
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def _UpperCamelCase ( self , **_A ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _UpperCamelCase ( self , _A , _A=False , _A=20 , _A=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
SCREAMING_SNAKE_CASE_ = []
for i in range(len(_A ) ):
try:
SCREAMING_SNAKE_CASE_ = tokenizer.decode([i] , clean_up_tokenization_spaces=_A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda _A : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , _A ) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
SCREAMING_SNAKE_CASE_ = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
SCREAMING_SNAKE_CASE_ = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE_ = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
SCREAMING_SNAKE_CASE_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE_ = ''' ''' + output_txt
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
SCREAMING_SNAKE_CASE_ = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = '''Unicode €.'''
SCREAMING_SNAKE_CASE_ = tokenizer(_A )
SCREAMING_SNAKE_CASE_ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A )
self.assertEqual(_A , '''Unicode €.</s>''' )
SCREAMING_SNAKE_CASE_ = tokenizer('''e è é ê ë''' )
SCREAMING_SNAKE_CASE_ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A )
self.assertEqual(_A , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(_A , padding=_A , return_tensors=_A )
self.assertIsInstance(_A , _A )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
SCREAMING_SNAKE_CASE_ = tokenizer(_A , padding=_A , return_tensors=_A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , _A )
self.assertIn('''attention_mask''' , _A )
self.assertNotIn('''decoder_input_ids''' , _A )
self.assertNotIn('''decoder_attention_mask''' , _A )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = [
'''Summary of the text.''',
'''Another summary.''',
]
SCREAMING_SNAKE_CASE_ = tokenizer(
text_target=_A , max_length=32 , padding='''max_length''' , truncation=_A , return_tensors=_A )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization. </s>''']
SCREAMING_SNAKE_CASE_ = ['''Summary of the text. </s>''']
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
SCREAMING_SNAKE_CASE_ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(_A , text_target=_A )
self.assertEqual(_A , batch['''input_ids'''][0] )
self.assertEqual(_A , batch['''labels'''][0] )
def _UpperCamelCase ( self ) -> Dict:
# safety check on max_len default value so we are sure the test works
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = ''' He is very happy, UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
shutil.rmtree(_A )
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
SCREAMING_SNAKE_CASE_ = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_A )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(_A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(_A )
SCREAMING_SNAKE_CASE_ = [F'''<extra_id_{i}>''' for i in range(125 )]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(_A , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
_A , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_A )]
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(_A )
self.assertTrue(tokenizer.decode([255] ) == '''''' )
def _UpperCamelCase ( self ) -> int:
pass
def _UpperCamelCase ( self ) -> Any:
pass
def _UpperCamelCase ( self ) -> Any:
pass
def _UpperCamelCase ( self ) -> Optional[int]:
pass
def _UpperCamelCase ( self ) -> Union[str, Any]:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(_A , _A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(
_A , skip_special_tokens=_A )
for attr in attributes_list:
setattr(_A , attr + '''_id''' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '''_id''' ) , _A )
setattr(_A , attr + '''_id''' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '''_id''' ) , _A )
setattr(_A , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [] )
setattr(_A , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
| 299
| 0
|
'''simple docstring'''
def _A ( A__ = 3 , A__ = 7 , A__ = 1000000 ):
"""simple docstring"""
__lowercase = 0
__lowercase = 1
for current_denominator in range(1 , limit + 1 ):
__lowercase = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__lowercase = current_numerator
__lowercase = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000))
| 104
|
from cva import destroyAllWindows, imread, imshow, waitKey
def A__ ( __lowerCamelCase ):
# getting number of pixels in the image
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__UpperCAmelCase = imread("image_data/lena.jpg", 1)
# convert to its negative
__UpperCAmelCase = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 299
| 0
|
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _UpperCamelCase :
'''simple docstring'''
@staticmethod
def __lowerCamelCase ( *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : List[Any]):
'''simple docstring'''
pass
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __lowerCamelCase ( self : Any , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Tuple):
'''simple docstring'''
__lowercase =DepthEstimationPipeline(model=_A , image_processor=_A)
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int]):
'''simple docstring'''
__lowercase =depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png')
self.assertEqual({'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)} , _A)
import datasets
__lowercase =datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test')
__lowercase =depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png'),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
])
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)},
{'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)},
{'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)},
{'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)},
{'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)},
] , _A , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF')
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
pass
@slow
@require_torch
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase ='Intel/dpt-large'
__lowercase =pipeline('depth-estimation' , model=_A)
__lowercase =depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg')
__lowercase =hashimage(outputs['depth'])
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item()) , 29.304)
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item()) , 2.662)
@require_torch
def __lowerCamelCase ( self : Any):
'''simple docstring'''
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT')
| 166
|
import math
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__lowerCamelCase )
def A__ ( __lowerCamelCase = 1 / 1_23_45 ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 3
while True:
SCREAMING_SNAKE_CASE_ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(__lowerCamelCase )
total_partitions += 1
if check_partition_perfect(__lowerCamelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__lowerCamelCase )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
| 0
|
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
UpperCamelCase_ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] ) -> str:
for attribute in key.split(""".""" ):
_lowerCAmelCase : int = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
_lowerCAmelCase : Optional[Any] = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
_lowerCAmelCase : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
_lowerCAmelCase : Any = value
elif weight_type == "weight_g":
_lowerCAmelCase : List[str] = value
elif weight_type == "weight_v":
_lowerCAmelCase : int = value
elif weight_type == "bias":
_lowerCAmelCase : Union[str, Any] = value
else:
_lowerCAmelCase : str = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] ) -> Optional[int]:
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Any = fairseq_model.state_dict()
_lowerCAmelCase : Dict = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
_lowerCAmelCase : str = True
else:
for key, mapped_key in MAPPING.items():
_lowerCAmelCase : Optional[int] = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key):
# special case since naming is very similar
continue
_lowerCAmelCase : Optional[int] = True
if "*" in mapped_key:
_lowerCAmelCase : Optional[int] = name.split(__lowerCamelCase )[0].split(""".""" )[-2]
_lowerCAmelCase : Tuple = mapped_key.replace("""*""" , __lowerCamelCase )
if "weight_g" in name:
_lowerCAmelCase : Tuple = """weight_g"""
elif "weight_v" in name:
_lowerCAmelCase : Optional[Any] = """weight_v"""
elif "bias" in name:
_lowerCAmelCase : List[Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCAmelCase : Dict = """weight"""
else:
_lowerCAmelCase : Tuple = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def _UpperCAmelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple ) -> int:
_lowerCAmelCase : Union[str, Any] = full_name.split("""conv_layers.""" )[-1]
_lowerCAmelCase : Any = name.split(""".""" )
_lowerCAmelCase : List[str] = int(items[0] )
_lowerCAmelCase : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
_lowerCAmelCase : int = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
_lowerCAmelCase : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' )
_lowerCAmelCase : Tuple = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' )
_lowerCAmelCase : Optional[int] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : Any=True ) -> Optional[Any]:
if config_path is not None:
_lowerCAmelCase : List[Any] = UniSpeechSatConfig.from_pretrained(__lowerCamelCase )
else:
_lowerCAmelCase : Dict = UniSpeechSatConfig()
_lowerCAmelCase : int = """"""
if is_finetuned:
_lowerCAmelCase : List[str] = UniSpeechSatForCTC(__lowerCamelCase )
else:
_lowerCAmelCase : List[str] = UniSpeechSatForPreTraining(__lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
_lowerCAmelCase : Tuple = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCamelCase_ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 309
|
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase ) if (len(__lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ), '''Stack'''.center(__lowerCamelCase ), '''Postfix'''.center(__lowerCamelCase ), sep=''' | ''', )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowerCamelCase ) == 0:
stack.append(__lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowerCamelCase ) # push x to stack
print(
x.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
while len(__lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
return "".join(__lowerCamelCase ) # return Postfix as str
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowerCamelCase ) ):
if infix[i] == "(":
SCREAMING_SNAKE_CASE_ = ''')''' # change "(" to ")"
elif infix[i] == ")":
SCREAMING_SNAKE_CASE_ = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(__lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__UpperCAmelCase = input("\nEnter an Infix Equation = ") # Input an Infix equation
__UpperCAmelCase = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 299
| 0
|
def __lowerCamelCase ( __a :int , __a :int , __a :Tuple ) -> Dict:
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
A__ = _modexpt(__lowerCamelCase , exponent // 2 , __lowerCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__lowerCamelCase , exponent - 1 , __lowerCamelCase )) % modulo_value
def __lowerCamelCase ( __a :List[str] = 1_7_7_7 , __a :Any = 1_8_5_5 , __a :Any = 8 ) -> List[str]:
"""simple docstring"""
A__ = base
for _ in range(1 , __lowerCamelCase ):
A__ = _modexpt(__lowerCamelCase , __lowerCamelCase , 1_0**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 274
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["input_features", "is_longer"]
def __init__( self , _A=64 , _A=48000 , _A=480 , _A=10 , _A=1024 , _A=0.0 , _A=False , _A = 0 , _A = 14000 , _A = None , _A = "fusion" , _A = "repeatpad" , **_A , ) -> Dict:
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
SCREAMING_SNAKE_CASE_ = top_db
SCREAMING_SNAKE_CASE_ = truncation
SCREAMING_SNAKE_CASE_ = padding
SCREAMING_SNAKE_CASE_ = fft_window_size
SCREAMING_SNAKE_CASE_ = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE_ = hop_length
SCREAMING_SNAKE_CASE_ = max_length_s
SCREAMING_SNAKE_CASE_ = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = frequency_min
SCREAMING_SNAKE_CASE_ = frequency_max
SCREAMING_SNAKE_CASE_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm=_A , mel_scale='''htk''' , )
SCREAMING_SNAKE_CASE_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , )
def _UpperCamelCase ( self ) -> Dict[str, Any]:
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _UpperCamelCase ( self , _A , _A = None ) -> np.ndarray:
SCREAMING_SNAKE_CASE_ = spectrogram(
_A , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_A , log_mel='''dB''' , )
return log_mel_spectrogram.T
def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE_ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE_ = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE_ = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE_ = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.interpolate(
_A , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=_A )
SCREAMING_SNAKE_CASE_ = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _UpperCamelCase ( self , _A , _A , _A , _A ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE_ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE_ = len(_A ) - max_length
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE_ = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters )
SCREAMING_SNAKE_CASE_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE_ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE_ = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE_ = False
else:
SCREAMING_SNAKE_CASE_ = self._random_mel_fusion(_A , _A , _A )
SCREAMING_SNAKE_CASE_ = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
SCREAMING_SNAKE_CASE_ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE_ = int(max_length / len(_A ) )
SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_A , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE_ = int(max_length / len(_A ) )
SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_A , _A ) )
SCREAMING_SNAKE_CASE_ = np.pad(_A , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters )
SCREAMING_SNAKE_CASE_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , **_A , ) -> BatchFeature:
SCREAMING_SNAKE_CASE_ = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE_ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE_ = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE_ = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray(_A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
SCREAMING_SNAKE_CASE_ = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray(_A )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE_ = [
self._get_input_mel(_A , max_length if max_length else self.nb_max_samples , _A , _A )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for mel, longer in padded_inputs:
input_mel.append(_A )
is_longer.append(_A )
if truncation == "fusion" and sum(_A ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , len(_A ) )
SCREAMING_SNAKE_CASE_ = True
if isinstance(input_mel[0] , _A ):
SCREAMING_SNAKE_CASE_ = [np.asarray(_A , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE_ = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE_ = {'''input_features''': input_mel, '''is_longer''': is_longer}
SCREAMING_SNAKE_CASE_ = BatchFeature(_A )
if return_tensors is not None:
SCREAMING_SNAKE_CASE_ = input_features.convert_to_tensors(_A )
return input_features
| 299
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__A = logging.get_logger(__name__)
class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , _A , )
super().__init__(*_A , **_A )
| 148
|
import math
import random
def A__ ( __lowerCamelCase, __lowerCamelCase = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCAmelCase = 0.02
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = float(2 * (random.randint(1, 1_00 )) - 1 )
for _ in range(__lowerCamelCase ):
# Forward propagation
SCREAMING_SNAKE_CASE_ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
SCREAMING_SNAKE_CASE_ = (expected / 1_00) - layer_a
# Error delta
SCREAMING_SNAKE_CASE_ = layer_1_error * sigmoid_function(__lowerCamelCase, __lowerCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input("Expected value: "))
__UpperCAmelCase = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 299
| 0
|
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_A : Optional[Any] = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
_A : int = json.load(f)
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
return FSMTTokenizer.from_pretrained(_A )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
__lowerCAmelCase = FSMTForConditionalGeneration.from_pretrained(_A ).to(_A )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 2_6.0],
["""ru-en""", 2_2.0],
["""en-de""", 2_2.0],
["""de-en""", 2_9.0],
] )
@slow
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
__lowerCAmelCase = f"""facebook/wmt19-{pair}"""
__lowerCAmelCase = self.get_tokenizer(_A )
__lowerCAmelCase = self.get_model(_A )
__lowerCAmelCase = bleu_data[pair]["""src"""]
__lowerCAmelCase = bleu_data[pair]["""tgt"""]
__lowerCAmelCase = tokenizer(_A , return_tensors="""pt""" , truncation=_A , padding="""longest""" ).to(_A )
__lowerCAmelCase = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
__lowerCAmelCase = tokenizer.batch_decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
__lowerCAmelCase = calculate_bleu(_A , _A )
print(_A )
self.assertGreaterEqual(scores["""bleu"""] , _A )
| 229
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 299
| 0
|
def a ( A__ : Tuple ) -> List[Any]:
"""simple docstring"""
return " ".join(
''.join(word[::-1] ) if len(__lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 205
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , _A = False , ) -> List[str]:
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Embedding(_A , _A )
SCREAMING_SNAKE_CASE_ = nn.Embedding(_A , _A )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = nn.Dropout(p=_A )
SCREAMING_SNAKE_CASE_ = TaConfig(
vocab_size=_A , d_model=_A , num_heads=_A , d_kv=_A , d_ff=_A , dropout_rate=_A , feed_forward_proj=_A , is_decoder=_A , is_encoder_decoder=_A , )
SCREAMING_SNAKE_CASE_ = nn.ModuleList()
for lyr_num in range(_A ):
SCREAMING_SNAKE_CASE_ = TaBlock(_A )
self.encoders.append(_A )
SCREAMING_SNAKE_CASE_ = TaLayerNorm(_A )
SCREAMING_SNAKE_CASE_ = nn.Dropout(p=_A )
def _UpperCamelCase ( self , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.token_embedder(_A )
SCREAMING_SNAKE_CASE_ = encoder_input_tokens.shape[1]
SCREAMING_SNAKE_CASE_ = torch.arange(_A , device=encoder_input_tokens.device )
x += self.position_encoding(_A )
SCREAMING_SNAKE_CASE_ = self.dropout_pre(_A )
# inverted the attention mask
SCREAMING_SNAKE_CASE_ = encoder_input_tokens.size()
SCREAMING_SNAKE_CASE_ = self.get_extended_attention_mask(_A , _A )
for lyr in self.encoders:
SCREAMING_SNAKE_CASE_ = lyr(_A , _A )[0]
SCREAMING_SNAKE_CASE_ = self.layer_norm(_A )
return self.dropout_post(_A ), encoder_inputs_mask
| 299
| 0
|
"""simple docstring"""
import math
def _a ( ) -> Optional[Any]:
snake_case_ = input("""Enter message: """ )
snake_case_ = int(input(f"""Enter key [2-{len(__lowerCamelCase ) - 1}]: """ ) )
snake_case_ = input("""Encryption/Decryption [e/d]: """ )
if mode.lower().startswith("""e""" ):
snake_case_ = encrypt_message(__lowerCamelCase , __lowerCamelCase )
elif mode.lower().startswith("""d""" ):
snake_case_ = decrypt_message(__lowerCamelCase , __lowerCamelCase )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"""Output:\n{text + "|"}""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
snake_case_ = [""""""] * key
for col in range(__lowerCamelCase ):
snake_case_ = col
while pointer < len(__lowerCamelCase ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(__lowerCamelCase )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ = math.ceil(len(__lowerCamelCase ) / key )
snake_case_ = key
snake_case_ = (num_cols * num_rows) - len(__lowerCamelCase )
snake_case_ = [""""""] * num_cols
snake_case_ = 0
snake_case_ = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
snake_case_ = 0
row += 1
return "".join(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 347
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="Wav2Vec2FeatureExtractor"
UpperCAmelCase_ ="AutoTokenizer"
def __init__( self , _A , _A ) -> Dict:
super().__init__(_A , _A )
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
@classmethod
def _UpperCamelCase ( cls , _A , **_A ) -> List[str]:
try:
return super().from_pretrained(_A , **_A )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , _A , )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE_ = WavaVecaCTCTokenizer.from_pretrained(_A , **_A )
return cls(feature_extractor=_A , tokenizer=_A )
def __call__( self , *_A , **_A ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_A , **_A )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''raw_speech''' )
else:
SCREAMING_SNAKE_CASE_ = kwargs.pop('''audio''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''sampling_rate''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''text''' , _A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(_A , **_A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ = encodings['''input_ids''']
return inputs
def _UpperCamelCase ( self , *_A , **_A ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_A , **_A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''input_features''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''labels''' , _A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor.pad(_A , *_A , **_A )
if labels is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad(_A , **_A )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE_ = labels['''input_ids''']
return input_features
def _UpperCamelCase ( self , *_A , **_A ) -> Any:
return self.tokenizer.batch_decode(*_A , **_A )
def _UpperCamelCase ( self , *_A , **_A ) -> Optional[Any]:
return self.tokenizer.decode(*_A , **_A )
@contextmanager
def _UpperCamelCase ( self ) -> Optional[int]:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.tokenizer
yield
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
| 299
| 0
|
import functools
from typing import Any
def lowerCamelCase__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or len(__lowerCamelCase ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not all(
isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
lowerCAmelCase_ = {}
lowerCAmelCase_ = "WORD_KEEPER"
for word in words:
lowerCAmelCase_ = trie
for c in word:
if c not in trie_node:
lowerCAmelCase_ = {}
lowerCAmelCase_ = trie_node[c]
lowerCAmelCase_ = True
lowerCAmelCase_ = len(__lowerCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(__lowerCAmelCase : List[str] ) -> bool:
if index == len_string:
return True
lowerCAmelCase_ = trie
for i in range(__lowerCamelCase , __lowerCamelCase ):
lowerCAmelCase_ = trie_node.get(string[i] , __lowerCamelCase )
if trie_node is None:
return False
if trie_node.get(__lowerCamelCase , __lowerCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__UpperCAmelCase = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = test_results.split(''' ''' )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
SCREAMING_SNAKE_CASE_ = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__lowerCamelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''', __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
SCREAMING_SNAKE_CASE_ = line
SCREAMING_SNAKE_CASE_ = False
return failures
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = title
SCREAMING_SNAKE_CASE_ = doc_test_results['''time_spent'''].split(''',''' )[0]
SCREAMING_SNAKE_CASE_ = doc_test_results['''success''']
SCREAMING_SNAKE_CASE_ = doc_test_results['''failures''']
SCREAMING_SNAKE_CASE_ = self.n_success + self.n_failures
# Failures and success of the modeling tests
SCREAMING_SNAKE_CASE_ = doc_test_results
@property
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = [self._time_spent]
SCREAMING_SNAKE_CASE_ = 0
for time in time_spent:
SCREAMING_SNAKE_CASE_ = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_A ) == 1:
SCREAMING_SNAKE_CASE_ = [0, 0, time_parts[0]]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'''{int(_A )}h{int(_A )}m{int(_A )}s'''
@property
def _UpperCamelCase ( self ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCamelCase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _UpperCamelCase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = 40
SCREAMING_SNAKE_CASE_ = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_A , _A )}
SCREAMING_SNAKE_CASE_ = ''''''
for category, failures in category_failures.items():
if len(_A ) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_A )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_A )
@staticmethod
def _UpperCamelCase ( ) -> Any:
SCREAMING_SNAKE_CASE_ = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_A )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=_A , )
def _UpperCamelCase ( self ) -> Optional[int]:
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
SCREAMING_SNAKE_CASE_ = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else '''All tests passed.'''
SCREAMING_SNAKE_CASE_ = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=_A , )
def _UpperCamelCase ( self , _A , _A , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = ''''''
for key, value in failures.items():
SCREAMING_SNAKE_CASE_ = value[:200] + ''' [Truncated]''' if len(_A ) > 250 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
SCREAMING_SNAKE_CASE_ = job_name
SCREAMING_SNAKE_CASE_ = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
SCREAMING_SNAKE_CASE_ = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCamelCase ( self ) -> int:
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
SCREAMING_SNAKE_CASE_ = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
SCREAMING_SNAKE_CASE_ = sorted(self.doc_test_results.items() , key=lambda _A : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
SCREAMING_SNAKE_CASE_ = F'''*Num failures* :{len(job_result["failed"] )} \n'''
SCREAMING_SNAKE_CASE_ = job_result['''failures''']
SCREAMING_SNAKE_CASE_ = self.get_reply_blocks(_A , _A , _A , text=_A )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=F'''Results for {job}''' , blocks=_A , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = os.environ['''GITHUB_RUN_ID''']
SCREAMING_SNAKE_CASE_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
SCREAMING_SNAKE_CASE_ = requests.get(__lowerCamelCase ).json()
SCREAMING_SNAKE_CASE_ = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
SCREAMING_SNAKE_CASE_ = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''', __lowerCamelCase )
return {}
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
if os.path.exists(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = os.listdir(__lowerCamelCase )
for file in files:
try:
with open(os.path.join(__lowerCamelCase, __lowerCamelCase ), encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(__lowerCamelCase, __lowerCamelCase )}.''' ) from e
return _artifact
def A__ ( ):
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = name
SCREAMING_SNAKE_CASE_ = []
def __str__( self ) -> int:
return self.name
def _UpperCamelCase ( self , _A ) -> Tuple:
self.paths.append({'''name''': self.name, '''path''': path} )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = filter(os.path.isdir, os.listdir() )
for directory in directories:
SCREAMING_SNAKE_CASE_ = directory
if artifact_name not in _available_artifacts:
SCREAMING_SNAKE_CASE_ = Artifact(__lowerCamelCase )
_available_artifacts[artifact_name].add_path(__lowerCamelCase )
return _available_artifacts
if __name__ == "__main__":
__UpperCAmelCase = get_job_links()
__UpperCAmelCase = retrieve_available_artifacts()
__UpperCAmelCase = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__UpperCAmelCase = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__UpperCAmelCase = github_actions_job_links.get("run_doctests")
__UpperCAmelCase = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
__UpperCAmelCase = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = handle_test_results(artifact["stats"])
__UpperCAmelCase = failed
__UpperCAmelCase = success
__UpperCAmelCase = time_spent[1:-1] + ", "
__UpperCAmelCase = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
__UpperCAmelCase = line.replace("FAILED ", "")
__UpperCAmelCase = line.split()[0].replace("\n", "")
if "::" in line:
__UpperCAmelCase , __UpperCAmelCase = line.split("::")
else:
__UpperCAmelCase , __UpperCAmelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__UpperCAmelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__UpperCAmelCase = all_failures[test] if test in all_failures else "N/A"
__UpperCAmelCase = failure
break
__UpperCAmelCase = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 299
| 0
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self :int ) -> Dict:
__UpperCamelCase : str = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
__UpperCamelCase : List[str] = dict(zip(_A , range(len(_A ) ) ) )
__UpperCamelCase : Optional[int] = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
__UpperCamelCase : Optional[int] = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 1_6_0_0_0,
"return_attention_mask": False,
"do_normalize": True,
}
__UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
__UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase : List[str] = os.path.join(self.tmpdirname , _A )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_A ) + "\n" )
with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_A ) + "\n" )
# load decoder from hub
__UpperCamelCase : str = "hf-internal-testing/ngram-beam-search-decoder"
def _lowerCamelCase ( self :str , **a :List[str] ) -> int:
__UpperCamelCase : Any = self.add_kwargs_tokens_map.copy()
kwargs.update(_A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_A )
def _lowerCamelCase ( self :List[Any] , **a :Dict ) -> int:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_A )
def _lowerCamelCase ( self :str , **a :List[str] ) -> Any:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_A )
def _lowerCamelCase ( self :Optional[int] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self :str ) -> Optional[int]:
__UpperCamelCase : int = self.get_tokenizer()
__UpperCamelCase : List[Any] = self.get_feature_extractor()
__UpperCamelCase : int = self.get_decoder()
__UpperCamelCase : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _A )
def _lowerCamelCase ( self :int ) -> List[Any]:
__UpperCamelCase : Optional[Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__UpperCamelCase : Any = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def _lowerCamelCase ( self :Optional[Any] ) -> Dict:
__UpperCamelCase : str = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(_A , "include" ):
WavaVecaProcessorWithLM(
tokenizer=_A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def _lowerCamelCase ( self :List[str] ) -> str:
__UpperCamelCase : List[str] = self.get_feature_extractor()
__UpperCamelCase : Union[str, Any] = self.get_tokenizer()
__UpperCamelCase : Tuple = self.get_decoder()
__UpperCamelCase : int = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
__UpperCamelCase : Union[str, Any] = floats_list((3, 1_0_0_0) )
__UpperCamelCase : str = feature_extractor(_A , return_tensors="np" )
__UpperCamelCase : Union[str, Any] = processor(_A , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCamelCase ( self :int ) -> Dict:
__UpperCamelCase : List[str] = self.get_feature_extractor()
__UpperCamelCase : Tuple = self.get_tokenizer()
__UpperCamelCase : List[str] = self.get_decoder()
__UpperCamelCase : Dict = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
__UpperCamelCase : Dict = "This is a test string"
__UpperCamelCase : Optional[Any] = processor(text=_A )
__UpperCamelCase : Dict = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCamelCase ( self :List[str] , a :List[Any]=(2, 1_0, 1_6) , a :List[Any]=7_7 ) -> Optional[Any]:
np.random.seed(_A )
return np.random.rand(*_A )
def _lowerCamelCase ( self :Optional[int] ) -> Optional[int]:
__UpperCamelCase : List[Any] = self.get_feature_extractor()
__UpperCamelCase : List[Any] = self.get_tokenizer()
__UpperCamelCase : List[Any] = self.get_decoder()
__UpperCamelCase : int = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
__UpperCamelCase : str = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 )
__UpperCamelCase : List[str] = processor.decode(_A )
__UpperCamelCase : Optional[int] = decoder.decode_beams(_A )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("</s> <s> </s>" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def _lowerCamelCase ( self :List[str] , a :Optional[Any] ) -> Dict:
__UpperCamelCase : Dict = self.get_feature_extractor()
__UpperCamelCase : Optional[int] = self.get_tokenizer()
__UpperCamelCase : Any = self.get_decoder()
__UpperCamelCase : str = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
__UpperCamelCase : str = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__UpperCamelCase : Tuple = processor.batch_decode(_A )
else:
with get_context(_A ).Pool() as pool:
__UpperCamelCase : Optional[Any] = processor.batch_decode(_A , _A )
__UpperCamelCase : List[Any] = list(_A )
with get_context("fork" ).Pool() as p:
__UpperCamelCase : List[Any] = decoder.decode_beams_batch(_A , _A )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Tuple = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_A , decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text )
self.assertListEqual(_A , decoded_processor.logit_score )
self.assertListEqual(_A , decoded_processor.lm_score )
def _lowerCamelCase ( self :Dict ) -> int:
__UpperCamelCase : Tuple = self.get_feature_extractor()
__UpperCamelCase : Optional[int] = self.get_tokenizer()
__UpperCamelCase : Optional[int] = self.get_decoder()
__UpperCamelCase : int = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
__UpperCamelCase : str = self._get_dummy_logits()
__UpperCamelCase : List[str] = 1_5
__UpperCamelCase : int = -20.0
__UpperCamelCase : List[Any] = -4.0
__UpperCamelCase : Optional[Any] = processor.batch_decode(
_A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
__UpperCamelCase : List[str] = decoded_processor_out.text
__UpperCamelCase : int = list(_A )
with get_context("fork" ).Pool() as pool:
__UpperCamelCase : str = decoder.decode_beams_batch(
_A , _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
__UpperCamelCase : Dict = [d[0][0] for d in decoded_decoder_out]
__UpperCamelCase : str = [d[0][2] for d in decoded_decoder_out]
__UpperCamelCase : int = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , _A )
self.assertTrue(np.array_equal(_A , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _A , atol=1E-3 ) )
self.assertTrue(np.array_equal(_A , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , _A , atol=1E-3 ) )
def _lowerCamelCase ( self :Optional[int] ) -> int:
__UpperCamelCase : Tuple = self.get_feature_extractor()
__UpperCamelCase : Optional[Any] = self.get_tokenizer()
__UpperCamelCase : Any = self.get_decoder()
__UpperCamelCase : List[str] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
__UpperCamelCase : int = self._get_dummy_logits()
__UpperCamelCase : str = 2.0
__UpperCamelCase : List[str] = 5.0
__UpperCamelCase : str = -20.0
__UpperCamelCase : Union[str, Any] = True
__UpperCamelCase : Optional[Any] = processor.batch_decode(
_A , alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
__UpperCamelCase : Tuple = decoded_processor_out.text
__UpperCamelCase : Optional[int] = list(_A )
decoder.reset_params(
alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
with get_context("fork" ).Pool() as pool:
__UpperCamelCase : Optional[int] = decoder.decode_beams_batch(
_A , _A , )
__UpperCamelCase : Union[str, Any] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , _A )
__UpperCamelCase : List[Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _A )
def _lowerCamelCase ( self :str ) -> Optional[int]:
__UpperCamelCase : List[str] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
__UpperCamelCase : Dict = processor.decoder.model_container[processor.decoder._model_key]
__UpperCamelCase : Any = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
__UpperCamelCase : Union[str, Any] = os.listdir(_A )
__UpperCamelCase : Union[str, Any] = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_A , _A )
def _lowerCamelCase ( self :Optional[Any] ) -> Optional[Any]:
__UpperCamelCase : int = snapshot_download("hf-internal-testing/processor_with_lm" )
__UpperCamelCase : int = WavaVecaProcessorWithLM.from_pretrained(_A )
__UpperCamelCase : List[str] = processor.decoder.model_container[processor.decoder._model_key]
__UpperCamelCase : Optional[int] = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
__UpperCamelCase : Optional[int] = os.listdir(_A )
__UpperCamelCase : List[str] = os.listdir(_A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_A , _A )
def _lowerCamelCase ( self :Optional[Any] ) -> List[Any]:
__UpperCamelCase : str = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
__UpperCamelCase : int = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
__UpperCamelCase : Any = floats_list((3, 1_0_0_0) )
__UpperCamelCase : List[Any] = processor_wavaveca(_A , return_tensors="np" )
__UpperCamelCase : Tuple = processor_auto(_A , return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__UpperCamelCase : Union[str, Any] = self._get_dummy_logits()
__UpperCamelCase : Any = processor_wavaveca.batch_decode(_A )
__UpperCamelCase : Dict = processor_auto.batch_decode(_A )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def _lowerCamelCase ( self :str ) -> Optional[int]:
__UpperCamelCase : List[str] = self.get_feature_extractor()
__UpperCamelCase : List[Any] = self.get_tokenizer()
__UpperCamelCase : int = self.get_decoder()
__UpperCamelCase : List[Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def _lowerCamelCase ( a :List[Any] , a :Dict ) -> Optional[Any]:
__UpperCamelCase : List[str] = [d[key] for d in offsets]
return retrieved_list
def _lowerCamelCase ( self :Optional[int] ) -> str:
__UpperCamelCase : str = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
__UpperCamelCase : Optional[Any] = self._get_dummy_logits()[0]
__UpperCamelCase : int = processor.decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] )
def _lowerCamelCase ( self :Tuple ) -> Optional[int]:
__UpperCamelCase : Dict = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
__UpperCamelCase : Union[str, Any] = self._get_dummy_logits()
__UpperCamelCase : Any = processor.batch_decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(_A , "word" ) ) for o in outputs["word_offsets"]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _lowerCamelCase ( self :List[str] ) -> int:
import torch
__UpperCamelCase : str = load_dataset("common_voice" , "en" , split="train" , streaming=_A )
__UpperCamelCase : Tuple = ds.cast_column("audio" , datasets.Audio(sampling_rate=1_6_0_0_0 ) )
__UpperCamelCase : Tuple = iter(_A )
__UpperCamelCase : str = next(_A )
__UpperCamelCase : Union[str, Any] = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
__UpperCamelCase : List[Any] = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__UpperCamelCase : Any = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values
with torch.no_grad():
__UpperCamelCase : List[Any] = model(_A ).logits.cpu().numpy()
__UpperCamelCase : Any = processor.decode(logits[0] , output_word_offsets=_A )
__UpperCamelCase : Optional[int] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__UpperCamelCase : Optional[Any] = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
__UpperCamelCase : List[Any] = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(_A , "word" ) ) , _A )
self.assertEqual(" ".join(self.get_from_offsets(_A , "word" ) ) , output.text )
# output times
__UpperCamelCase : Tuple = torch.tensor(self.get_from_offsets(_A , "start_time" ) )
__UpperCamelCase : List[str] = torch.tensor(self.get_from_offsets(_A , "end_time" ) )
# fmt: off
__UpperCamelCase : Any = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__UpperCamelCase : List[str] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_A , _A , atol=0.01 ) )
self.assertTrue(torch.allclose(_A , _A , atol=0.01 ) )
| 232
|
from __future__ import annotations
__UpperCAmelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
SCREAMING_SNAKE_CASE_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) )
] # the reference grid
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__lowerCamelCase ) )
] # the action grid
SCREAMING_SNAKE_CASE_ = init[0]
SCREAMING_SNAKE_CASE_ = init[1]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = g + heuristic[x][y] # cost from starting cell to destination cell
SCREAMING_SNAKE_CASE_ = [[f, g, x, y]]
SCREAMING_SNAKE_CASE_ = False # flag that is set when search is complete
SCREAMING_SNAKE_CASE_ = False # flag set if we can't find expand
while not found and not resign:
if len(__lowerCamelCase ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
SCREAMING_SNAKE_CASE_ = cell.pop()
SCREAMING_SNAKE_CASE_ = next_cell[2]
SCREAMING_SNAKE_CASE_ = next_cell[3]
SCREAMING_SNAKE_CASE_ = next_cell[1]
if x == goal[0] and y == goal[1]:
SCREAMING_SNAKE_CASE_ = True
else:
for i in range(len(__lowerCamelCase ) ): # to try out different valid actions
SCREAMING_SNAKE_CASE_ = x + DIRECTIONS[i][0]
SCREAMING_SNAKE_CASE_ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
SCREAMING_SNAKE_CASE_ = g + cost
SCREAMING_SNAKE_CASE_ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = i
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = goal[0]
SCREAMING_SNAKE_CASE_ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
SCREAMING_SNAKE_CASE_ = x - DIRECTIONS[action[x][y]][0]
SCREAMING_SNAKE_CASE_ = y - DIRECTIONS[action[x][y]][1]
SCREAMING_SNAKE_CASE_ = xa
SCREAMING_SNAKE_CASE_ = ya
invpath.append([x, y] )
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__lowerCamelCase ) ):
path.append(invpath[len(__lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCAmelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCAmelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCAmelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCAmelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCAmelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCAmelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCAmelCase = 99
__UpperCAmelCase , __UpperCAmelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 299
| 0
|
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
lowerCamelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__lowerCamelCase ):
return ext
raise Exception(
f"""Unable to determine file format from file extension {path}. """
f"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]:
snake_case : List[str] = pipeline(
task=args.task ,model=args.model if args.model else None ,config=args.config ,tokenizer=args.tokenizer ,device=args.device ,)
snake_case : List[Any] = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
snake_case : Dict = PipelineDataFormat.from_str(
format=__lowerCamelCase ,output_path=args.output ,input_path=args.input ,column=args.column if args.column else nlp.default_input_names ,overwrite=args.overwrite ,)
return RunCommand(__lowerCamelCase ,__lowerCamelCase )
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , A , A ) -> Union[str, Any]:
snake_case : List[str] = nlp
snake_case : Optional[int] = reader
@staticmethod
def UpperCAmelCase ( A ) -> List[Any]:
snake_case : Union[str, Any] = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=_A , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=_A , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=_A , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=_A , help="""Name or path to the model\'s config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=_A , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=_A , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=_A , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=_A , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=_A )
def UpperCAmelCase ( self ) -> str:
snake_case , snake_case : int = self._nlp, []
for entry in self._reader:
snake_case : Optional[Any] = nlp(**_A ) if self._reader.is_multi_columns else nlp(_A )
if isinstance(_A , _A ):
outputs.append(_A )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
snake_case : Optional[int] = self._reader.save_binary(_A )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(_A )
| 124
|
from __future__ import annotations
from collections.abc import Callable
__UpperCAmelCase = list[list[float | int]]
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [[0 for _ in range(size + 1 )] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for row in range(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = matrix[row][col]
SCREAMING_SNAKE_CASE_ = vector[row][0]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE_ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCamelCase, __lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = augmented[pivot_row], augmented[row]
for rowa in range(row + 1, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE_ = 0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, __lowerCamelCase ):
for row in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCamelCase, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 10 )] for row in range(__lowerCamelCase )
]
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [[0 for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = [[0] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for x_val, y_val in enumerate(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = (x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE_ = y_val
SCREAMING_SNAKE_CASE_ = solve(__lowerCamelCase, __lowerCamelCase )
def interpolated_func(__lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCamelCase ) )
return interpolated_func
def A__ ( __lowerCamelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def A__ ( __lowerCamelCase = question_function, __lowerCamelCase = 10 ):
SCREAMING_SNAKE_CASE_ = [func(__lowerCamelCase ) for x_val in range(1, order + 1 )]
SCREAMING_SNAKE_CASE_ = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for poly in polynomials:
SCREAMING_SNAKE_CASE_ = 1
while func(__lowerCamelCase ) == poly(__lowerCamelCase ):
x_val += 1
ret += poly(__lowerCamelCase )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
| 0
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowerCAmelCase__ = '''pytorch_model.bin'''
lowerCAmelCase__ = '''pytorch_model.bin.index.json'''
lowerCAmelCase__ = '''adapter_config.json'''
lowerCAmelCase__ = '''adapter_model.bin'''
lowerCAmelCase__ = '''adapter_model.safetensors'''
lowerCAmelCase__ = '''tf_model.h5'''
lowerCAmelCase__ = '''tf_model.h5.index.json'''
lowerCAmelCase__ = '''model.ckpt'''
lowerCAmelCase__ = '''flax_model.msgpack'''
lowerCAmelCase__ = '''flax_model.msgpack.index.json'''
lowerCAmelCase__ = '''model.safetensors'''
lowerCAmelCase__ = '''model.safetensors.index.json'''
lowerCAmelCase__ = '''config.json'''
lowerCAmelCase__ = '''preprocessor_config.json'''
lowerCAmelCase__ = FEATURE_EXTRACTOR_NAME
lowerCAmelCase__ = '''generation_config.json'''
lowerCAmelCase__ = '''modelcard.json'''
lowerCAmelCase__ = '''▁'''
lowerCAmelCase__ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowerCAmelCase__ = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowerCAmelCase__ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowerCAmelCase__ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _A ( A__ ):
"""simple docstring"""
if version.parse(__lowerCamelCase ) < version.parse(__lowerCamelCase ):
if "dev" in min_version:
__lowercase = (
'''This example requires a source install from HuggingFace Transformers (see '''
'''`https://huggingface.co/docs/transformers/installation#install-from-source`),'''
)
else:
__lowercase = F"This example requires a minimum version of {min_version},"
error_message += F" but the version found is {__version__}.\n"
raise ImportError(
error_message
+ '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '''
'''versions of HuggingFace Transformers.''' )
| 104
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__UpperCAmelCase = logging.getLogger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="summarization"
UpperCAmelCase_ =["loss"]
UpperCAmelCase_ =ROUGE_KEYS
UpperCAmelCase_ ="rouge2"
def __init__( self , _A , **_A ) -> Tuple:
if hparams.sortish_sampler and hparams.gpus > 1:
SCREAMING_SNAKE_CASE_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(_A , num_labels=_A , mode=self.mode , **_A )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
SCREAMING_SNAKE_CASE_ = Path(self.output_dir ) / '''metrics.json'''
SCREAMING_SNAKE_CASE_ = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = defaultdict(_A )
SCREAMING_SNAKE_CASE_ = self.config.model_type
SCREAMING_SNAKE_CASE_ = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
SCREAMING_SNAKE_CASE_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
SCREAMING_SNAKE_CASE_ = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
SCREAMING_SNAKE_CASE_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
SCREAMING_SNAKE_CASE_ = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
SCREAMING_SNAKE_CASE_ = get_git_info()['''repo_sha''']
SCREAMING_SNAKE_CASE_ = hparams.num_workers
SCREAMING_SNAKE_CASE_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _A ):
SCREAMING_SNAKE_CASE_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
SCREAMING_SNAKE_CASE_ = self.decoder_start_token_id
SCREAMING_SNAKE_CASE_ = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
SCREAMING_SNAKE_CASE_ = self.hparams.eval_max_gen_length
else:
SCREAMING_SNAKE_CASE_ = self.model.config.max_length
SCREAMING_SNAKE_CASE_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _UpperCamelCase ( self , _A ) -> Dict[str, List[str]]:
SCREAMING_SNAKE_CASE_ = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(_A , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
SCREAMING_SNAKE_CASE_ = True
return readable_batch
def _UpperCamelCase ( self , _A , **_A ) -> List[str]:
return self.model(_A , **_A )
def _UpperCamelCase ( self , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
return lmap(str.strip , _A )
def _UpperCamelCase ( self , _A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = batch['''input_ids'''], batch['''attention_mask''']
SCREAMING_SNAKE_CASE_ = batch['''labels''']
if isinstance(self.model , _A ):
SCREAMING_SNAKE_CASE_ = self.model._shift_right(_A )
else:
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_A , _A )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
SCREAMING_SNAKE_CASE_ = decoder_input_ids
self.save_readable_batch(_A )
SCREAMING_SNAKE_CASE_ = self(_A , attention_mask=_A , decoder_input_ids=_A , use_cache=_A )
SCREAMING_SNAKE_CASE_ = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
SCREAMING_SNAKE_CASE_ = nn.CrossEntropyLoss(ignore_index=_A )
assert lm_logits.shape[-1] == self.vocab_size
SCREAMING_SNAKE_CASE_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
SCREAMING_SNAKE_CASE_ = nn.functional.log_softmax(_A , dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = label_smoothed_nll_loss(
_A , _A , self.hparams.label_smoothing , ignore_index=_A )
return (loss,)
@property
def _UpperCamelCase ( self ) -> int:
return self.tokenizer.pad_token_id
def _UpperCamelCase ( self , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = self._step(_A )
SCREAMING_SNAKE_CASE_ = dict(zip(self.loss_names , _A ) )
# tokens per batch
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].shape[0]
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].eq(self.pad ).sum()
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _UpperCamelCase ( self , _A , _A ) -> Dict:
return self._generative_step(_A )
def _UpperCamelCase ( self , _A , _A="val" ) -> Dict:
self.step_count += 1
SCREAMING_SNAKE_CASE_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
SCREAMING_SNAKE_CASE_ = losses['''loss''']
SCREAMING_SNAKE_CASE_ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
SCREAMING_SNAKE_CASE_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
SCREAMING_SNAKE_CASE_ = torch.tensor(_A ).type_as(_A )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_A )
SCREAMING_SNAKE_CASE_ = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
SCREAMING_SNAKE_CASE_ = self.step_count
self.metrics[prefix].append(_A ) # callback writes this to self.metrics_save_path
SCREAMING_SNAKE_CASE_ = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def _UpperCamelCase ( self , _A , _A ) -> Dict:
return calculate_rouge(_A , _A )
def _UpperCamelCase ( self , _A ) -> dict:
SCREAMING_SNAKE_CASE_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
SCREAMING_SNAKE_CASE_ = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_A , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
SCREAMING_SNAKE_CASE_ = (time.time() - ta) / batch['''input_ids'''].shape[0]
SCREAMING_SNAKE_CASE_ = self.ids_to_clean_text(_A )
SCREAMING_SNAKE_CASE_ = self.ids_to_clean_text(batch['''labels'''] )
SCREAMING_SNAKE_CASE_ = self._step(_A )
SCREAMING_SNAKE_CASE_ = dict(zip(self.loss_names , _A ) )
SCREAMING_SNAKE_CASE_ = self.calc_generative_metrics(_A , _A )
SCREAMING_SNAKE_CASE_ = np.mean(lmap(_A , _A ) )
base_metrics.update(gen_time=_A , gen_len=_A , preds=_A , target=_A , **_A )
return base_metrics
def _UpperCamelCase ( self , _A , _A ) -> Any:
return self._generative_step(_A )
def _UpperCamelCase ( self , _A ) -> Optional[int]:
return self.validation_epoch_end(_A , prefix='''test''' )
def _UpperCamelCase ( self , _A ) -> SeqaSeqDataset:
SCREAMING_SNAKE_CASE_ = self.n_obs[type_path]
SCREAMING_SNAKE_CASE_ = self.target_lens[type_path]
SCREAMING_SNAKE_CASE_ = self.dataset_class(
self.tokenizer , type_path=_A , n_obs=_A , max_target_length=_A , **self.dataset_kwargs , )
return dataset
def _UpperCamelCase ( self , _A , _A , _A = False ) -> DataLoader:
SCREAMING_SNAKE_CASE_ = self.get_dataset(_A )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ = dataset.make_sortish_sampler(_A , distributed=self.hparams.gpus > 1 )
return DataLoader(
_A , batch_size=_A , collate_fn=dataset.collate_fn , shuffle=_A , num_workers=self.num_workers , sampler=_A , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_A , batch_sampler=_A , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_A , batch_size=_A , collate_fn=dataset.collate_fn , shuffle=_A , num_workers=self.num_workers , sampler=_A , )
def _UpperCamelCase ( self ) -> DataLoader:
SCREAMING_SNAKE_CASE_ = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_A )
return dataloader
def _UpperCamelCase ( self ) -> DataLoader:
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def _UpperCamelCase ( self ) -> DataLoader:
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _UpperCamelCase ( _A , _A ) -> Dict:
BaseTransformer.add_model_specific_args(_A , _A )
add_generic_args(_A , _A )
parser.add_argument(
'''--max_source_length''' , default=1024 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_A )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_A )
parser.add_argument('''--max_tokens_per_batch''' , type=_A , default=_A )
parser.add_argument('''--logger_name''' , type=_A , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=_A , default=-1 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=_A , default=500 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=_A , default=-1 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=_A , default='''summarization''' , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=_A , default=0.0 , required=_A )
parser.add_argument('''--src_lang''' , type=_A , default='''''' , required=_A )
parser.add_argument('''--tgt_lang''' , type=_A , default='''''' , required=_A )
parser.add_argument('''--eval_beams''' , type=_A , default=_A , required=_A )
parser.add_argument(
'''--val_metric''' , type=_A , default=_A , required=_A , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=_A , default=_A , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=_A , default=1 , required=_A , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=_A , default=-1 , required=_A , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="translation"
UpperCAmelCase_ =["loss"]
UpperCAmelCase_ =["bleu"]
UpperCAmelCase_ ="bleu"
def __init__( self , _A , **_A ) -> Optional[int]:
super().__init__(_A , **_A )
SCREAMING_SNAKE_CASE_ = hparams.src_lang
SCREAMING_SNAKE_CASE_ = hparams.tgt_lang
def _UpperCamelCase ( self , _A , _A ) -> dict:
return calculate_bleu(_A , _A )
def A__ ( __lowerCamelCase, __lowerCamelCase=None ):
Path(args.output_dir ).mkdir(exist_ok=__lowerCamelCase )
check_output_dir(__lowerCamelCase, expected_items=3 )
if model is None:
if "summarization" in args.task:
SCREAMING_SNAKE_CASE_ = SummarizationModule(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = TranslationModule(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
SCREAMING_SNAKE_CASE_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ = os.environ.get('''WANDB_PROJECT''', __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = WandbLogger(name=model.output_dir.name, project=__lowerCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ = WandbLogger(name=model.output_dir.name, project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
SCREAMING_SNAKE_CASE_ = get_early_stopping_callback(model.val_metric, args.early_stopping_patience )
else:
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = args.val_metric == '''loss'''
SCREAMING_SNAKE_CASE_ = generic_train(
__lowerCamelCase, __lowerCamelCase, logging_callback=SeqaSeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(
args.output_dir, model.val_metric, args.save_top_k, __lowerCamelCase ), early_stopping_callback=__lowerCamelCase, logger=__lowerCamelCase, )
pickle_save(model.hparams, model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
SCREAMING_SNAKE_CASE_ = ''''''
SCREAMING_SNAKE_CASE_ = sorted(glob.glob(os.path.join(args.output_dir, '''*.ckpt''' ), recursive=__lowerCamelCase ) )
if checkpoints:
SCREAMING_SNAKE_CASE_ = checkpoints[-1]
SCREAMING_SNAKE_CASE_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
__UpperCAmelCase = pl.Trainer.add_argparse_args(parser)
__UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__UpperCAmelCase = parser.parse_args()
main(args)
| 299
| 0
|
'''simple docstring'''
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
while a != 0:
__lowercase , __lowercase =b % a, a
return b
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if gcd(__lowerCamelCase , __lowerCamelCase ) != 1:
__lowercase =f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(__lowerCamelCase )
__lowercase , __lowercase , __lowercase =1, 0, a
__lowercase , __lowercase , __lowercase =0, 1, m
while va != 0:
__lowercase =ua // va
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase =(ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 166
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["LayoutLMv2FeatureExtractor"]
__UpperCAmelCase = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 299
| 0
|
'''simple docstring'''
UpperCamelCase_ = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : Tuple ) -> Optional[int]:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : str ) -> str:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 309
|
import functools
def A__ ( __lowerCamelCase, __lowerCamelCase ):
# Validation
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(__lowerCamelCase ) != 3 or not all(isinstance(__lowerCamelCase, __lowerCamelCase ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(__lowerCamelCase ) == 0:
return 0
if min(__lowerCamelCase ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(__lowerCamelCase ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
SCREAMING_SNAKE_CASE_ = set(__lowerCamelCase )
@functools.cache
def dynamic_programming(__lowerCamelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ), costs[1] + dynamic_programming(index + 7 ), costs[2] + dynamic_programming(index + 30 ), )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
| 0
|
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
A : Any = '''http://www.mocksite.com/file1.txt'''
A : Tuple = '''\"text\": [\"foo\", \"foo\"]'''
A : Optional[int] = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'''
class A :
'''simple docstring'''
__lowerCamelCase : Optional[Any] = 200
__lowerCamelCase : int = {'''Content-Length''': '''100'''}
__lowerCamelCase : List[str] = {}
def a_ ( self : Union[str, Any] , **__lowerCAmelCase : str ) -> int:
"""simple docstring"""
return [bytes(_A , """utf-8""" )]
def __lowerCamelCase ( *__a :Tuple , **__a :Optional[Any] ) -> str:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def __lowerCamelCase ( __a :Union[str, Any] , __a :int , __a :str ) -> str:
"""simple docstring"""
import requests
monkeypatch.setattr(__lowerCamelCase , """request""" , __lowerCamelCase )
A__ = URL
if issubclass(__lowerCamelCase , __lowerCamelCase ):
A__ = url
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
A__ = [url]
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
A__ = {"""train""": url}
A__ = """dummy"""
A__ = """downloads"""
A__ = tmp_path
A__ = DownloadConfig(
cache_dir=os.path.join(__lowerCamelCase , __lowerCamelCase ) , use_etag=__lowerCamelCase , )
A__ = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase )
A__ = dl_manager.download(__lowerCamelCase )
A__ = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A__ = [downloaded_paths]
A__ = [urls]
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
assert "train" in downloaded_paths.keys()
A__ = downloaded_paths.values()
A__ = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__lowerCamelCase , __lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
A__ = Path(__lowerCamelCase )
A__ = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
A__ = downloaded_path.read_text()
assert content == CONTENT
A__ = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
A__ = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def __lowerCamelCase ( __a :Union[str, Any] , __a :List[Any] , __a :Optional[int] ) -> List[str]:
"""simple docstring"""
A__ = str(__lowerCamelCase )
if issubclass(__lowerCamelCase , __lowerCamelCase ):
A__ = filename
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
A__ = [filename]
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
A__ = {"""train""": filename}
A__ = """dummy"""
A__ = xz_file.parent
A__ = """extracted"""
A__ = DownloadConfig(
cache_dir=__lowerCamelCase , use_etag=__lowerCamelCase , )
A__ = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase )
A__ = dl_manager.extract(__lowerCamelCase )
A__ = paths
for extracted_paths in [extracted_paths]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
A__ = [extracted_paths]
A__ = [paths]
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
assert "train" in extracted_paths.keys()
A__ = extracted_paths.values()
A__ = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__lowerCamelCase , __lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
A__ = Path(__lowerCamelCase )
A__ = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__lowerCamelCase , etag=__lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
A__ = extracted_path.read_text()
A__ = text_file.read_text()
assert extracted_file_content == expected_file_content
def __lowerCamelCase ( __a :str , __a :List[Any] ) -> Any:
"""simple docstring"""
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(__lowerCamelCase , start=1 ):
A__ = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def __lowerCamelCase ( __a :Union[str, Any] , __a :Optional[Any] ) -> Tuple:
"""simple docstring"""
A__ = request.getfixturevalue(__lowerCamelCase )
A__ = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
_test_jsonl(__lowerCamelCase , __lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def __lowerCamelCase ( __a :Optional[int] , __a :int ) -> List[Any]:
"""simple docstring"""
A__ = request.getfixturevalue(__lowerCamelCase )
A__ = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
_test_jsonl(__lowerCamelCase , __lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def __lowerCamelCase ( __a :Dict ) -> Any:
"""simple docstring"""
A__ = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__lowerCamelCase ) , start=1 ):
assert os.path.basename(__lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 274
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__UpperCAmelCase = logging.get_logger(__name__)
enable_full_determinism()
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> List[str]:
return (3, 32, 32)
@property
def _UpperCamelCase ( self ) -> Tuple:
return (3, 32, 32)
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> Tuple:
return (4, 32, 32)
@property
def _UpperCamelCase ( self ) -> Tuple:
return (4, 32, 32)
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
SCREAMING_SNAKE_CASE_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model.to(_A )
SCREAMING_SNAKE_CASE_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _UpperCamelCase ( self ) -> Dict:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model_accelerate.to(_A )
model_accelerate.eval()
SCREAMING_SNAKE_CASE_ = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE_ = noise.to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] * noise.shape[0] ).to(_A )
SCREAMING_SNAKE_CASE_ = model_accelerate(_A , _A )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=_A , low_cpu_mem_usage=_A )
model_normal_load.to(_A )
model_normal_load.eval()
SCREAMING_SNAKE_CASE_ = model_normal_load(_A , _A )['''sample''']
assert torch_all_close(_A , _A , rtol=1E-3 )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(_A )
SCREAMING_SNAKE_CASE_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE_ = noise.to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] * noise.shape[0] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-3 ) )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self , _A=(32, 32) ) -> int:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> List[str]:
return (3, 32, 32)
@property
def _UpperCamelCase ( self ) -> List[Any]:
return (3, 32, 32)
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1E-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
@slow
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
SCREAMING_SNAKE_CASE_ = self.dummy_input
SCREAMING_SNAKE_CASE_ = floats_tensor((4, 3) + (256, 256) ).to(_A )
SCREAMING_SNAKE_CASE_ = noise
SCREAMING_SNAKE_CASE_ = model(**_A )
assert image is not None, "Make sure output is not None"
@slow
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(_A )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (256, 256)
SCREAMING_SNAKE_CASE_ = torch.ones((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [1E-4] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-2 ) )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(_A )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = torch.ones((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [1E-4] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-2 ) )
def _UpperCamelCase ( self ) -> Dict:
# not required for this model
pass
| 299
| 0
|
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def UpperCamelCase__ ( lowercase__ : Optional[int] ):
return 1 / (1 + np.exp(-z ))
def UpperCamelCase__ ( lowercase__ : List[str] , lowercase__ : int ):
return (-y * np.log(__lowerCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def UpperCamelCase__ ( lowercase__ : Optional[int] , lowercase__ : int , lowercase__ : str ):
snake_case : List[str] = np.dot(__lowerCamelCase , __lowerCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(__lowerCamelCase ) ) )
def UpperCamelCase__ ( lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : Optional[int] , lowercase__ : Optional[int]=7_0000 ):
snake_case : Optional[int] = np.zeros(x.shape[1] )
for iterations in range(__lowerCamelCase ):
snake_case : Tuple = np.dot(__lowerCamelCase , __lowerCamelCase )
snake_case : Optional[int] = sigmoid_function(__lowerCamelCase )
snake_case : Optional[int] = np.dot(x.T , h - y ) / y.size
snake_case : Any = theta - alpha * gradient # updating the weights
snake_case : int = np.dot(__lowerCamelCase , __lowerCamelCase )
snake_case : int = sigmoid_function(__lowerCamelCase )
snake_case : Tuple = cost_function(__lowerCamelCase , __lowerCamelCase )
if iterations % 100 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__A = datasets.load_iris()
__A = iris.data[:, :2]
__A = (iris.target != 0) * 1
__A = 0.1
__A = logistic_reg(alpha, x, y, max_iterations=70_000)
print("theta: ", theta) # printing the theta i.e our weights vector
def UpperCamelCase__ ( lowercase__ : List[Any] ):
return sigmoid_function(
np.dot(__lowerCamelCase , __lowerCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
((__A) , (__A)) = (x[:, 0].min(), x[:, 0].max())
((__A) , (__A)) = (x[:, 1].min(), x[:, 1].max())
((__A) , (__A)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__A = np.c_[xxa.ravel(), xxa.ravel()]
__A = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 148
|
def A__ ( __lowerCamelCase = 10_00 ):
SCREAMING_SNAKE_CASE_ = 2**power
SCREAMING_SNAKE_CASE_ = 0
while n:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 299
| 0
|
'''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _lowercase :
'''simple docstring'''
pass
| 229
|
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["torch", "scipy"]
def __init__( self , *_A , **_A ) -> Tuple:
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Any:
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Tuple:
requires_backends(cls , ['''torch''', '''scipy'''] )
| 299
| 0
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_a = ["""input_features""", """is_longer"""]
def __init__( self , lowerCAmelCase=64 , lowerCAmelCase=48_000 , lowerCAmelCase=480 , lowerCAmelCase=10 , lowerCAmelCase=1_024 , lowerCAmelCase=0.0 , lowerCAmelCase=False , lowerCAmelCase = 0 , lowerCAmelCase = 14_000 , lowerCAmelCase = None , lowerCAmelCase = "fusion" , lowerCAmelCase = "repeatpad" , **lowerCAmelCase , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
_lowercase =top_db
_lowercase =truncation
_lowercase =padding
_lowercase =fft_window_size
_lowercase =(fft_window_size >> 1) + 1
_lowercase =hop_length
_lowercase =max_length_s
_lowercase =max_length_s * sampling_rate
_lowercase =sampling_rate
_lowercase =frequency_min
_lowercase =frequency_max
_lowercase =mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm=_A , mel_scale='htk' , )
_lowercase =mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm='slaney' , mel_scale='slaney' , )
def A__ ( self ) -> Dict[str, Any]:
'''simple docstring'''
_lowercase =copy.deepcopy(self.__dict__ )
_lowercase =self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> np.ndarray:
'''simple docstring'''
_lowercase =spectrogram(
_A , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_A , log_mel='dB' , )
return log_mel_spectrogram.T
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowercase =np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase =[0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase =[0]
# randomly choose index for each part
_lowercase =np.random.choice(ranges[0] )
_lowercase =np.random.choice(ranges[1] )
_lowercase =np.random.choice(ranges[2] )
_lowercase =mel[idx_front : idx_front + chunk_frames, :]
_lowercase =mel[idx_middle : idx_middle + chunk_frames, :]
_lowercase =mel[idx_back : idx_back + chunk_frames, :]
_lowercase =torch.tensor(mel[None, None, :] )
_lowercase =torch.nn.functional.interpolate(
_A , size=[chunk_frames, 64] , mode='bilinear' , align_corners=_A )
_lowercase =mel_shrink[0][0].numpy()
_lowercase =np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_lowercase =True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_lowercase =len(_A ) - max_length
_lowercase =np.random.randint(0 , overflow + 1 )
_lowercase =waveform[idx : idx + max_length]
_lowercase =self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_lowercase =self._np_extract_fbank_features(_A , self.mel_filters )
_lowercase =max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_lowercase =mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_lowercase =np.stack([mel, mel, mel, mel] , axis=0 )
_lowercase =False
else:
_lowercase =self._random_mel_fusion(_A , _A , _A )
_lowercase =True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
_lowercase =False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_lowercase =int(max_length / len(_A ) )
_lowercase =np.stack(np.tile(_A , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_lowercase =int(max_length / len(_A ) )
_lowercase =np.stack(np.tile(_A , _A ) )
_lowercase =np.pad(_A , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
_lowercase =self._np_extract_fbank_features(_A , self.mel_filters )
_lowercase =np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_lowercase =self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ) -> BatchFeature:
'''simple docstring'''
_lowercase =truncation if truncation is not None else self.truncation
_lowercase =padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase =isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase =is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase =[np.asarray(_A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
_lowercase =np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase =[np.asarray(_A )]
# convert to mel spectrogram, truncate and pad if needed.
_lowercase =[
self._get_input_mel(_A , max_length if max_length else self.nb_max_samples , _A , _A )
for waveform in raw_speech
]
_lowercase =[]
_lowercase =[]
for mel, longer in padded_inputs:
input_mel.append(_A )
is_longer.append(_A )
if truncation == "fusion" and sum(_A ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_lowercase =np.random.randint(0 , len(_A ) )
_lowercase =True
if isinstance(input_mel[0] , _A ):
_lowercase =[np.asarray(_A , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_lowercase =[[longer] for longer in is_longer]
_lowercase ={'input_features': input_mel, 'is_longer': is_longer}
_lowercase =BatchFeature(_A )
if return_tensors is not None:
_lowercase =input_features.convert_to_tensors(_A )
return input_features
| 205
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=3 , _A=32 , _A=3 , _A=10 , _A=[10, 20, 30, 40] , _A=[1, 1, 2, 1] , _A=True , _A=True , _A="relu" , _A=3 , _A=None , ) -> Tuple:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embeddings_size
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = len(_A )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values
def _UpperCamelCase ( self ) -> Optional[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _UpperCamelCase ( self , _A , _A ) -> int:
SCREAMING_SNAKE_CASE_ = FlaxRegNetModel(config=_A )
SCREAMING_SNAKE_CASE_ = model(_A )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _UpperCamelCase ( self , _A , _A ) -> Any:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = FlaxRegNetForImageClassification(config=_A )
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =(FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> None:
SCREAMING_SNAKE_CASE_ = FlaxRegNetModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A , has_text_modality=_A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self ) -> str:
return
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _UpperCamelCase ( self ) -> int:
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _UpperCamelCase ( self ) -> Dict:
pass
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_A )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def _UpperCamelCase ( self ) -> Any:
def check_hidden_states_output(_A , _A , _A ):
SCREAMING_SNAKE_CASE_ = model_class(_A )
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_A , _A ) )
SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_ = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_A , _A , _A )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(_A , _A )
SCREAMING_SNAKE_CASE_ = model_class(_A )
@jax.jit
def model_jitted(_A , **_A ):
return model(pixel_values=_A , **_A )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE_ = model_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ = model_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase ( self ) -> Optional[int]:
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_A , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = model(**_A )
# verify the logits
SCREAMING_SNAKE_CASE_ = (1, 1000)
self.assertEqual(outputs.logits.shape , _A )
SCREAMING_SNAKE_CASE_ = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
| 299
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A (__SCREAMING_SNAKE_CASE , unittest.TestCase):
'''simple docstring'''
__lowercase: List[Any] = KandinskyVaaControlnetImgaImgPipeline
__lowercase: Union[str, Any] = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
__lowercase: Dict = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
__lowercase: Dict = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowercase: Optional[Any] = False
@property
def lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
return 32
@property
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
return 32
@property
def lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
return self.time_input_dim
@property
def lowerCAmelCase ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
return 100
@property
def lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case_ = UNetaDConditionModel(**_A )
return model
@property
def lowerCAmelCase ( self : int ) ->Tuple:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase ( self : Tuple ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.dummy_unet
snake_case_ = self.dummy_movq
snake_case_ = {
"""num_train_timesteps""": 1_000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00_085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
snake_case_ = DDIMScheduler(**_A )
snake_case_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any]=0 ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_A ) ).to(_A )
snake_case_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_A )
# create init_image
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ = Image.fromarray(np.uinta(_A ) ).convert("""RGB""" ).resize((256, 256) )
# create hint
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith("""mps""" ):
snake_case_ = torch.manual_seed(_A )
else:
snake_case_ = torch.Generator(device=_A ).manual_seed(_A )
snake_case_ = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
snake_case_ = """cpu"""
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**_A )
snake_case_ = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
snake_case_ = pipe(**self.get_dummy_inputs(_A ) )
snake_case_ = output.images
snake_case_ = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
snake_case_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
snake_case_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
snake_case_ = init_image.resize((512, 512) )
snake_case_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
snake_case_ = torch.from_numpy(np.array(_A ) ).float() / 255.0
snake_case_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case_ = """A robot, 4k photo"""
snake_case_ = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_A )
snake_case_ = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
snake_case_ = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
snake_case_ = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case_ , snake_case_ = pipe_prior(
_A , image=_A , strength=0.85 , generator=_A , negative_prompt="""""" , ).to_tuple()
snake_case_ = pipeline(
image=_A , image_embeds=_A , negative_image_embeds=_A , hint=_A , generator=_A , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="""np""" , )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_A , _A )
| 347
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(number**0.5 )
return number == sq * sq
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
SCREAMING_SNAKE_CASE_ = x_den * y_den * z_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def A__ ( __lowerCamelCase = 35 ):
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = Fraction(0 )
SCREAMING_SNAKE_CASE_ = 42
for x_num in range(1, order + 1 ):
for x_den in range(x_num + 1, order + 1 ):
for y_num in range(1, order + 1 ):
for y_den in range(y_num + 1, order + 1 ):
# n=1
SCREAMING_SNAKE_CASE_ = x_num * y_den + x_den * y_num
SCREAMING_SNAKE_CASE_ = x_den * y_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
SCREAMING_SNAKE_CASE_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
SCREAMING_SNAKE_CASE_ = x_den * x_den * y_den * y_den
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=-1
SCREAMING_SNAKE_CASE_ = x_num * y_num
SCREAMING_SNAKE_CASE_ = x_den * y_num + x_num * y_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
SCREAMING_SNAKE_CASE_ = x_num * x_num * y_num * y_num
SCREAMING_SNAKE_CASE_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
for num, den in unique_s:
total += Fraction(__lowerCamelCase, __lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
| 0
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
_A = logging.get_logger(__name__)
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> None:
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , _A , )
super().__init__(*_A , **_A )
| 231
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__( self , _A , _A = None , _A = None ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE_ = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
SCREAMING_SNAKE_CASE_ = torch.zeros(_A , _A )
else:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(_A )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
def __init__( self , _A , _A , _A , _A , _A , _A , ) -> Any:
super().__init__()
self.register_modules(
vqvae=_A , transformer=_A , text_encoder=_A , tokenizer=_A , scheduler=_A , learned_classifier_free_sampling_embeddings=_A , )
def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = len(_A ) if isinstance(_A , _A ) else 1
# get prompt text embeddings
SCREAMING_SNAKE_CASE_ = self.tokenizer(
_A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
SCREAMING_SNAKE_CASE_ = text_input_ids[:, : self.tokenizer.model_max_length]
SCREAMING_SNAKE_CASE_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
SCREAMING_SNAKE_CASE_ = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_A )
# duplicate text embeddings for each generation per prompt
SCREAMING_SNAKE_CASE_ = prompt_embeds.repeat_interleave(_A , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
SCREAMING_SNAKE_CASE_ = self.learned_classifier_free_sampling_embeddings.embeddings
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.unsqueeze(0 ).repeat(_A , 1 , 1 )
else:
SCREAMING_SNAKE_CASE_ = [''''''] * batch_size
SCREAMING_SNAKE_CASE_ = text_input_ids.shape[-1]
SCREAMING_SNAKE_CASE_ = self.tokenizer(
_A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_A )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.shape[1]
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.repeat(1 , _A , 1 )
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE_ = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , _A , _A = 100 , _A = 5.0 , _A = 1.0 , _A = 1 , _A = None , _A = None , _A = "pil" , _A = True , _A = None , _A = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = 1
elif isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = len(_A )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(_A )}''' )
SCREAMING_SNAKE_CASE_ = batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE_ = guidance_scale > 1.0
SCREAMING_SNAKE_CASE_ = self._encode_prompt(_A , _A , _A )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(_A )}.''' )
# get the initial completely masked latents unless the user supplied it
SCREAMING_SNAKE_CASE_ = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
SCREAMING_SNAKE_CASE_ = self.transformer.num_vector_embeds - 1
SCREAMING_SNAKE_CASE_ = torch.full(_A , _A ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
SCREAMING_SNAKE_CASE_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_A , device=self.device )
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps.to(self.device )
SCREAMING_SNAKE_CASE_ = latents
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the sample if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
SCREAMING_SNAKE_CASE_ = self.transformer(_A , encoder_hidden_states=_A , timestep=_A ).sample
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model_output.chunk(2 )
SCREAMING_SNAKE_CASE_ = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_A , dim=1 , keepdim=_A )
SCREAMING_SNAKE_CASE_ = self.truncate(_A , _A )
# remove `log(0)`'s (`-inf`s)
SCREAMING_SNAKE_CASE_ = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , timestep=_A , sample=_A , generator=_A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A , _A )
SCREAMING_SNAKE_CASE_ = self.vqvae.config.vq_embed_dim
SCREAMING_SNAKE_CASE_ = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
SCREAMING_SNAKE_CASE_ = self.vqvae.quantize.get_codebook_entry(_A , shape=_A )
SCREAMING_SNAKE_CASE_ = self.vqvae.decode(_A , force_not_quantize=_A ).sample
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
def _UpperCamelCase ( self , _A , _A ) -> torch.FloatTensor:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.sort(_A , 1 , descending=_A )
SCREAMING_SNAKE_CASE_ = torch.exp(_A )
SCREAMING_SNAKE_CASE_ = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
SCREAMING_SNAKE_CASE_ = torch.full_like(keep_mask[:, 0:1, :] , _A )
SCREAMING_SNAKE_CASE_ = torch.cat((all_true, keep_mask) , dim=1 )
SCREAMING_SNAKE_CASE_ = keep_mask[:, :-1, :]
SCREAMING_SNAKE_CASE_ = keep_mask.gather(1 , indices.argsort(1 ) )
SCREAMING_SNAKE_CASE_ = log_p_x_0.clone()
SCREAMING_SNAKE_CASE_ = -torch.inf # -inf = log(0)
return rv
| 299
| 0
|
import argparse
import json
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=__lowerCamelCase , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=__lowerCamelCase , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=__lowerCamelCase , help="where to store parsed gold_data_path file" , )
__UpperCamelCase : int = parser.parse_args()
with open(args.src_path , "r") as src_file, open(args.evaluation_set , "w") as eval_file, open(
args.gold_data_path , "w") as gold_file:
__UpperCamelCase : int = json.load(__lowerCamelCase)
for dpr_record in tqdm(__lowerCamelCase):
__UpperCamelCase : str = dpr_record["question"]
__UpperCamelCase : List[str] = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n")
gold_file.write("\t".join(__lowerCamelCase) + "\n")
if __name__ == "__main__":
main()
| 232
|
def A__ ( __lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
| 0
|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
snake_case : Any = 0
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
snake_case : List[str] = AutoTokenizer.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_A ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
snake_case : Dict = AutoTokenizer.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_A ) , 0 )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Dict = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Tuple = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Union[str, Any] = AutoConfig.from_pretrained(_A )
self.assertIsInstance(_A , _A )
# Check that tokenizer_type ≠ model_type
snake_case : Optional[Any] = AutoTokenizer.from_pretrained(_A , config=_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def UpperCAmelCase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(_A , """vocab.txt""" ) )
snake_case : int = AutoTokenizer.from_pretrained(_A , tokenizer_type="""bert""" , use_fast=_A )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(_A , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(_A , """merges.txt""" ) )
snake_case : int = AutoTokenizer.from_pretrained(_A , tokenizer_type="""gpt2""" , use_fast=_A )
self.assertIsInstance(_A , _A )
@require_tokenizers
def UpperCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(_A , """vocab.txt""" ) )
snake_case : int = AutoTokenizer.from_pretrained(_A , tokenizer_type="""bert""" )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(_A , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(_A , """merges.txt""" ) )
snake_case : List[str] = AutoTokenizer.from_pretrained(_A , tokenizer_type="""gpt2""" )
self.assertIsInstance(_A , _A )
def UpperCAmelCase ( self ) -> int:
with pytest.raises(_A ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def UpperCAmelCase ( self ) -> Optional[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
snake_case : Any = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
if isinstance(_A , _A ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _A )
else:
self.assertEqual(tokenizer.do_lower_case , _A )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def UpperCAmelCase ( self ) -> Optional[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_A , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
snake_case : Optional[int] = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def UpperCAmelCase ( self ) -> Optional[Any]:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
snake_case : List[Any] = TOKENIZER_MAPPING.values()
snake_case : List[str] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_A )
@require_tokenizers
def UpperCAmelCase ( self ) -> Dict:
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=_A ) , _A )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , _A )
@require_tokenizers
def UpperCAmelCase ( self ) -> Dict:
snake_case : str = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=_A )
snake_case : Optional[Any] = """Hello, world. How are you?"""
snake_case : List[str] = tokenizer.tokenize(_A )
self.assertEqual("""[UNK]""" , tokens[0] )
snake_case : List[Any] = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=_A )
snake_case : Tuple = tokenizer.tokenize(_A )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def UpperCAmelCase ( self ) -> int:
snake_case : Optional[int] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(_A ) , _A )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Optional[Any] = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
snake_case : Optional[Any] = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : List[str] = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_A , _A )
def UpperCAmelCase ( self ) -> str:
# Check we can load the tokenizer config of an online model.
snake_case : Any = get_tokenizer_config("""bert-base-cased""" )
snake_case : Optional[Any] = config.pop("""_commit_hash""" , _A )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_A , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
snake_case : Tuple = get_tokenizer_config(_A )
self.assertDictEqual(_A , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
snake_case : List[str] = AutoTokenizer.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
snake_case : Tuple = get_tokenizer_config(_A )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def UpperCAmelCase ( self ) -> Tuple:
try:
AutoConfig.register("""custom""" , _A )
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
snake_case : int = CustomTokenizer.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
snake_case : int = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , _A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def UpperCAmelCase ( self ) -> List[Any]:
try:
AutoConfig.register("""custom""" , _A )
# Can register in two steps
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(_A , fast_tokenizer_class=_A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_A , slow_tokenizer_class=_A , fast_tokenizer_class=_A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoTokenizer.register(_A , fast_tokenizer_class=_A )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : int = BertTokenizerFast.from_pretrained(_A )
bert_tokenizer.save_pretrained(_A )
snake_case : Any = CustomTokenizerFast.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , _A )
snake_case : Optional[int] = AutoTokenizer.from_pretrained(_A , use_fast=_A )
self.assertIsInstance(_A , _A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase ( self ) -> Optional[Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_A ):
snake_case : Union[str, Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A ):
snake_case : Tuple = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_A )
snake_case : Union[str, Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_A )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
snake_case : Tuple = AutoTokenizer.from_pretrained(_A , trust_remote_code=_A )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
snake_case : List[str] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_A , use_fast=_A )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(_A , trust_remote_code=_A , use_fast=_A )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def UpperCAmelCase ( self ) -> List[str]:
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_snake_case = False
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_snake_case = NewTokenizer
_snake_case = False
try:
AutoConfig.register("""custom""" , _A )
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
AutoTokenizer.register(_A , fast_tokenizer_class=_A )
# If remote code is not set, the default is to use local
snake_case : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
snake_case : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=_A )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
snake_case : List[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_A )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
snake_case : Tuple = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_A , use_fast=_A )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
snake_case : Tuple = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_A )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
snake_case : Optional[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_A , use_fast=_A )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase ( self ) -> List[str]:
snake_case : int = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=_A )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
snake_case : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=_A , use_fast=_A )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def UpperCAmelCase ( self ) -> List[Any]:
with self.assertRaisesRegex(
_A , """bert-base is not a local folder and is not a valid model identifier""" ):
snake_case : List[Any] = AutoTokenizer.from_pretrained("""bert-base""" )
def UpperCAmelCase ( self ) -> Tuple:
with self.assertRaisesRegex(
_A , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(_A , revision="""aaaaaa""" )
def UpperCAmelCase ( self ) -> Optional[int]:
# Make sure we have cached the tokenizer.
snake_case : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
snake_case : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 124
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCAmelCase = "pt"
elif is_tf_available():
__UpperCAmelCase = "tf"
else:
__UpperCAmelCase = "jax"
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =ByTaTokenizer
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Tuple:
super().setUp()
SCREAMING_SNAKE_CASE_ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCamelCase ( self ) -> List[str]:
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def _UpperCamelCase ( self , **_A ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _UpperCamelCase ( self , _A , _A=False , _A=20 , _A=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
SCREAMING_SNAKE_CASE_ = []
for i in range(len(_A ) ):
try:
SCREAMING_SNAKE_CASE_ = tokenizer.decode([i] , clean_up_tokenization_spaces=_A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda _A : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , _A ) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
SCREAMING_SNAKE_CASE_ = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
SCREAMING_SNAKE_CASE_ = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE_ = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
SCREAMING_SNAKE_CASE_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE_ = ''' ''' + output_txt
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
SCREAMING_SNAKE_CASE_ = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = '''Unicode €.'''
SCREAMING_SNAKE_CASE_ = tokenizer(_A )
SCREAMING_SNAKE_CASE_ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A )
self.assertEqual(_A , '''Unicode €.</s>''' )
SCREAMING_SNAKE_CASE_ = tokenizer('''e è é ê ë''' )
SCREAMING_SNAKE_CASE_ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A )
self.assertEqual(_A , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(_A , padding=_A , return_tensors=_A )
self.assertIsInstance(_A , _A )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
SCREAMING_SNAKE_CASE_ = tokenizer(_A , padding=_A , return_tensors=_A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , _A )
self.assertIn('''attention_mask''' , _A )
self.assertNotIn('''decoder_input_ids''' , _A )
self.assertNotIn('''decoder_attention_mask''' , _A )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = [
'''Summary of the text.''',
'''Another summary.''',
]
SCREAMING_SNAKE_CASE_ = tokenizer(
text_target=_A , max_length=32 , padding='''max_length''' , truncation=_A , return_tensors=_A )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization. </s>''']
SCREAMING_SNAKE_CASE_ = ['''Summary of the text. </s>''']
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
SCREAMING_SNAKE_CASE_ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(_A , text_target=_A )
self.assertEqual(_A , batch['''input_ids'''][0] )
self.assertEqual(_A , batch['''labels'''][0] )
def _UpperCamelCase ( self ) -> Dict:
# safety check on max_len default value so we are sure the test works
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = ''' He is very happy, UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
shutil.rmtree(_A )
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
SCREAMING_SNAKE_CASE_ = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(_A , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_A )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(_A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(_A )
SCREAMING_SNAKE_CASE_ = [F'''<extra_id_{i}>''' for i in range(125 )]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(_A , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
_A , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_A )]
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(_A )
self.assertTrue(tokenizer.decode([255] ) == '''''' )
def _UpperCamelCase ( self ) -> int:
pass
def _UpperCamelCase ( self ) -> Any:
pass
def _UpperCamelCase ( self ) -> Any:
pass
def _UpperCamelCase ( self ) -> Optional[int]:
pass
def _UpperCamelCase ( self ) -> Union[str, Any]:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(_A , _A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(
_A , skip_special_tokens=_A )
for attr in attributes_list:
setattr(_A , attr + '''_id''' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '''_id''' ) , _A )
setattr(_A , attr + '''_id''' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '''_id''' ) , _A )
setattr(_A , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [] )
setattr(_A , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(_A , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(_A , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
| 299
| 0
|
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 'new-model'
if is_tf_available():
class lowercase_ (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = NewModelConfig
@require_tf
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = '''bert-base-cased'''
__lowercase = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
__lowercase = TFAutoModel.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = '''bert-base-cased'''
__lowercase = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
__lowercase = TFAutoModelForPreTraining.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
__lowercase = TFAutoModelForCausalLM.from_pretrained(_A )
__lowercase , __lowercase = TFAutoModelForCausalLM.from_pretrained(_A ,output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
__lowercase = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
__lowercase = TFAutoModelForMaskedLM.from_pretrained(_A )
__lowercase , __lowercase = TFAutoModelForMaskedLM.from_pretrained(_A ,output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
__lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(_A )
__lowercase , __lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(_A ,output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowercase = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
__lowercase = TFAutoModelForSequenceClassification.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowercase = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
__lowercase = TFAutoModelForQuestionAnswering.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
@slow
@require_tensorflow_probability
def SCREAMING_SNAKE_CASE ( self : Tuple ):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__lowercase = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
__lowercase = TFAutoModelForTableQuestionAnswering.from_pretrained(_A )
__lowercase , __lowercase = TFAutoModelForTableQuestionAnswering.from_pretrained(
_A ,output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A ,_A )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsInstance(_A ,_A )
self.assertEqual(model.num_parameters() ,1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=_A ) ,1_4_4_1_0 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsInstance(_A ,_A )
self.assertEqual(model.num_parameters() ,1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=_A ) ,1_4_4_1_0 )
def SCREAMING_SNAKE_CASE ( self : Dict ):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
__lowercase = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(_A ,_A )
__lowercase = copy.deepcopy(model.config )
__lowercase = ['''FunnelBaseModel''']
__lowercase = TFAutoModel.from_config(_A )
self.assertIsInstance(_A ,_A )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A )
__lowercase = TFAutoModel.from_pretrained(_A )
self.assertIsInstance(_A ,_A )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
try:
AutoConfig.register('''new-model''' ,_A )
__lowercase = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(_A ):
auto_class.register(_A ,_A )
auto_class.register(_A ,_A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
auto_class.register(_A ,_A )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowercase = BertModelTester(self ).get_config()
__lowercase = NewModelConfig(**tiny_config.to_dict() )
__lowercase = auto_class.from_config(_A )
self.assertIsInstance(_A ,_A )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A )
__lowercase = auto_class.from_pretrained(_A )
self.assertIsInstance(_A ,_A )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
with self.assertRaisesRegex(
_A ,'''bert-base is not a local folder and is not a valid model identifier''' ):
__lowercase = TFAutoModel.from_pretrained('''bert-base''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
with self.assertRaisesRegex(
_A ,r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__lowercase = TFAutoModel.from_pretrained(_A ,revision='''aaaaaa''' )
def SCREAMING_SNAKE_CASE ( self : str ):
with self.assertRaisesRegex(
_A ,'''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' ,):
__lowercase = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
with self.assertRaisesRegex(_A ,'''Use `from_pt=True` to load this model''' ):
__lowercase = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
# Make sure we have cached the model.
__lowercase = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
__lowercase = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
# With a sharded checkpoint
__lowercase = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
__lowercase = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
| 104
|
from cva import destroyAllWindows, imread, imshow, waitKey
def A__ ( __lowerCamelCase ):
# getting number of pixels in the image
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__UpperCAmelCase = imread("image_data/lena.jpg", 1)
# convert to its negative
__UpperCAmelCase = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 299
| 0
|
'''simple docstring'''
lowerCamelCase = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 166
|
import math
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__lowerCamelCase )
def A__ ( __lowerCamelCase = 1 / 1_23_45 ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 3
while True:
SCREAMING_SNAKE_CASE_ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(__lowerCamelCase )
total_partitions += 1
if check_partition_perfect(__lowerCamelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__lowerCamelCase )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowerCAmelCase : int = StableDiffusionInstructPixaPixPipeline
__lowerCAmelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
__lowerCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCAmelCase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
_lowerCAmelCase : str = PNDMScheduler(skip_prk_steps=_A )
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_lowerCAmelCase : Tuple = CLIPTextModel(_A )
_lowerCAmelCase : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __UpperCamelCase ( self , snake_case_ , snake_case_=0 ):
_lowerCAmelCase : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_A ) ).to(_A )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : Dict = Image.fromarray(np.uinta(_A ) ).convert("""RGB""" )
if str(_A ).startswith("""mps""" ):
_lowerCAmelCase : Any = torch.manual_seed(_A )
else:
_lowerCAmelCase : List[str] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Dict = self.get_dummy_components()
_lowerCAmelCase : int = StableDiffusionInstructPixaPixPipeline(**_A )
_lowerCAmelCase : str = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Any = self.get_dummy_inputs(_A )
_lowerCAmelCase : int = sd_pipe(**_A ).images
_lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCAmelCase : Dict = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Union[str, Any] = StableDiffusionInstructPixaPixPipeline(**_A )
_lowerCAmelCase : Optional[int] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : List[str] = self.get_dummy_inputs(_A )
_lowerCAmelCase : Optional[Any] = """french fries"""
_lowerCAmelCase : Tuple = sd_pipe(**_A , negative_prompt=_A )
_lowerCAmelCase : Optional[Any] = output.images
_lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCAmelCase : str = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Optional[int] = self.get_dummy_components()
_lowerCAmelCase : int = StableDiffusionInstructPixaPixPipeline(**_A )
_lowerCAmelCase : List[str] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : int = self.get_dummy_inputs(_A )
_lowerCAmelCase : int = [inputs["""prompt"""]] * 2
_lowerCAmelCase : Dict = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
_lowerCAmelCase : List[Any] = torch.from_numpy(_A ).unsqueeze(0 ).to(_A )
_lowerCAmelCase : List[str] = image / 2 + 0.5
_lowerCAmelCase : Dict = image.permute(0 , 3 , 1 , 2 )
_lowerCAmelCase : List[str] = image.repeat(2 , 1 , 1 , 1 )
_lowerCAmelCase : List[str] = sd_pipe(**_A ).images
_lowerCAmelCase : Tuple = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
_lowerCAmelCase : List[str] = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Union[str, Any] = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
_lowerCAmelCase : str = StableDiffusionInstructPixaPixPipeline(**_A )
_lowerCAmelCase : Tuple = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[int] = self.get_dummy_inputs(_A )
_lowerCAmelCase : List[str] = sd_pipe(**_A ).images
_lowerCAmelCase : str = image[0, -3:, -3:, -1]
_lowerCAmelCase : Union[str, Any] = [round(_A , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(_A ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCAmelCase : List[str] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**_A )
_lowerCAmelCase : List[Any] = VaeImageProcessor(do_resize=_A , do_normalize=_A )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Dict = pipe(**self.get_dummy_inputs_by_type(_A , input_image_type="""pt""" ) )[0]
_lowerCAmelCase : Tuple = components["""vae"""]
_lowerCAmelCase : Optional[Any] = self.get_dummy_inputs_by_type(_A , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
_lowerCAmelCase : Union[str, Any] = vae.encode(inputs[image_param] ).latent_dist.mode()
_lowerCAmelCase : Dict = pipe(**_A )[0]
_lowerCAmelCase : Dict = np.abs(out - out_latents_inputs ).max()
self.assertLess(_A , 1E-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self , snake_case_=0 ):
_lowerCAmelCase : int = torch.manual_seed(_A )
_lowerCAmelCase : Optional[int] = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
_lowerCAmelCase : int = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
_lowerCAmelCase : List[Any] = self.get_inputs()
_lowerCAmelCase : Optional[int] = pipe(**_A ).images
_lowerCAmelCase : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCAmelCase : List[str] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_A )
_lowerCAmelCase : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
_lowerCAmelCase : Optional[int] = self.get_inputs()
_lowerCAmelCase : Optional[int] = pipe(**_A ).images
_lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCAmelCase : Union[str, Any] = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_A )
_lowerCAmelCase : Dict = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
_lowerCAmelCase : Union[str, Any] = self.get_inputs()
_lowerCAmelCase : int = pipe(**_A ).images
_lowerCAmelCase : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCAmelCase : Optional[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = 0
def callback_fn(snake_case_ , snake_case_ , snake_case_ ) -> None:
_lowerCAmelCase : List[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_lowerCAmelCase : Dict = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
_lowerCAmelCase : List[Any] = latents[0, -3:, -3:, -1]
_lowerCAmelCase : List[str] = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
_lowerCAmelCase : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
_lowerCAmelCase : int = latents[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
_lowerCAmelCase : Optional[int] = False
_lowerCAmelCase : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_A , torch_dtype=torch.floataa )
_lowerCAmelCase : List[str] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
_lowerCAmelCase : List[Any] = self.get_inputs()
pipe(**_A , callback=_A , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __UpperCamelCase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_A , torch_dtype=torch.floataa )
_lowerCAmelCase : List[Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase : str = self.get_inputs()
_lowerCAmelCase : Optional[int] = pipe(**_A )
_lowerCAmelCase : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
_lowerCAmelCase : Union[str, Any] = inputs["""image"""].resize((5_0_4, 5_0_4) )
_lowerCAmelCase : str = """timbrooks/instruct-pix2pix"""
_lowerCAmelCase : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
_A , safety_checker=_A , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
_lowerCAmelCase : Dict = pipe(**_A )
_lowerCAmelCase : Optional[Any] = output.images[0]
_lowerCAmelCase : List[str] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
_lowerCAmelCase : List[Any] = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 309
|
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase ) if (len(__lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ), '''Stack'''.center(__lowerCamelCase ), '''Postfix'''.center(__lowerCamelCase ), sep=''' | ''', )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowerCamelCase ) == 0:
stack.append(__lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowerCamelCase ) # push x to stack
print(
x.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
while len(__lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
return "".join(__lowerCamelCase ) # return Postfix as str
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowerCamelCase ) ):
if infix[i] == "(":
SCREAMING_SNAKE_CASE_ = ''')''' # change "(" to ")"
elif infix[i] == ")":
SCREAMING_SNAKE_CASE_ = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(__lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__UpperCAmelCase = input("\nEnter an Infix Equation = ") # Input an Infix equation
__UpperCAmelCase = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 299
| 0
|
from collections.abc import Callable
def __snake_case ( _lowerCAmelCase : Callable[[float], float] , _lowerCAmelCase : float , _lowerCAmelCase : float ) -> float:
A_ : float = a
A_ : float = b
if function(_lowerCAmelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_lowerCAmelCase ) == 0:
return b
elif (
function(_lowerCAmelCase ) * function(_lowerCAmelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
A_ : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_lowerCAmelCase ) == 0:
return mid
elif function(_lowerCAmelCase ) * function(_lowerCAmelCase ) < 0:
A_ : List[str] = mid
else:
A_ : Optional[Any] = mid
A_ : Optional[Any] = start + (end - start) / 2.0
return mid
def __snake_case ( _lowerCAmelCase : float ) -> float:
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 300
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = 42
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = 1
@register_to_config
def __init__( self :Union[str, Any] , snake_case :int = 2_000 , snake_case :float = 0.15 , snake_case :float = 0.01 , snake_case :float = 1348.0 , snake_case :float = 1e-5 , snake_case :int = 1 , ):
'''simple docstring'''
A_ : Dict = sigma_max
# setable values
A_ : List[Any] = None
self.set_sigmas(snake_case , snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :torch.FloatTensor , snake_case :Optional[int] = None ):
'''simple docstring'''
return sample
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :int , snake_case :float = None , snake_case :Union[str, torch.device] = None ):
'''simple docstring'''
A_ : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
A_ : Tuple = torch.linspace(1 , snake_case , snake_case , device=snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :int , snake_case :float = None , snake_case :float = None , snake_case :float = None ):
'''simple docstring'''
A_ : Union[str, Any] = sigma_min if sigma_min is not None else self.config.sigma_min
A_ : Any = sigma_max if sigma_max is not None else self.config.sigma_max
A_ : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(snake_case , snake_case )
A_ : str = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
A_ : Any = torch.exp(torch.linspace(math.log(snake_case ) , math.log(snake_case ) , snake_case ) )
A_ : str = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] , snake_case :Dict ):
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :torch.FloatTensor , snake_case :int , snake_case :torch.FloatTensor , snake_case :Optional[torch.Generator] = None , snake_case :bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
A_ : int = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
A_ : Optional[Any] = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
A_ : Dict = timesteps.to(self.discrete_sigmas.device )
A_ : Optional[int] = self.discrete_sigmas[timesteps].to(sample.device )
A_ : int = self.get_adjacent_sigma(snake_case , snake_case ).to(sample.device )
A_ : Union[str, Any] = torch.zeros_like(snake_case )
A_ : Tuple = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
A_ : Optional[int] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
A_ : Tuple = diffusion.unsqueeze(-1 )
A_ : Optional[Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
A_ : List[Any] = randn_tensor(
sample.shape , layout=sample.layout , generator=snake_case , device=sample.device , dtype=sample.dtype )
A_ : List[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
A_ : Any = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=snake_case , prev_sample_mean=snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , snake_case :Optional[torch.Generator] = None , snake_case :bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
A_ : Dict = randn_tensor(sample.shape , layout=sample.layout , generator=snake_case ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
A_ : int = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
A_ : List[Any] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
A_ : Dict = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
A_ : Dict = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
A_ : int = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
A_ : str = step_size.unsqueeze(-1 )
A_ : Optional[Any] = sample + step_size * model_output
A_ : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , ):
'''simple docstring'''
A_ : Union[str, Any] = timesteps.to(original_samples.device )
A_ : List[Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
A_ : List[Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(snake_case ) * sigmas[:, None, None, None]
)
A_ : Optional[int] = noise + original_samples
return noisy_samples
def __len__( self :Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 300
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.